applied-ai-018 commited on
Commit
51713a2
·
verified ·
1 Parent(s): dae9859

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__init__.py +102 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/feature_extraction_convnext.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/image_processing_convnext.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_convnext.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_tf_convnext.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py +142 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py +243 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py +33 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py +338 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py +551 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py +667 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__init__.py +64 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/__init__.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/configuration_kosmos2.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/modeling_kosmos2.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/processing_kosmos2.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/configuration_kosmos2.py +295 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py +77 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/modeling_kosmos2.py +2054 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/processing_kosmos2.py +666 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__init__.py +114 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/__init__.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/configuration_llama.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/convert_llama_weights_to_hf.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_flax_llama.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_llama.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama_fast.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/configuration_llama.py +191 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/convert_llama_weights_to_hf.py +339 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/modeling_flax_llama.py +749 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py +1566 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py +471 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama_fast.py +281 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/configuration_mt5.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_mt5.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/configuration_mt5.py +173 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/modeling_flax_mt5.py +120 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/modeling_mt5.py +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/modeling_tf_mt5.py +95 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/__init__.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/configuration_siglip.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/convert_siglip_to_hf.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/modeling_siglip.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/processing_siglip.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/tokenization_siglip.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__init__.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
27
+ }
28
+
29
+ try:
30
+ if not is_vision_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["feature_extraction_convnext"] = ["ConvNextFeatureExtractor"]
36
+ _import_structure["image_processing_convnext"] = ["ConvNextImageProcessor"]
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_convnext"] = [
45
+ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "ConvNextForImageClassification",
47
+ "ConvNextModel",
48
+ "ConvNextPreTrainedModel",
49
+ "ConvNextBackbone",
50
+ ]
51
+
52
+ try:
53
+ if not is_tf_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ _import_structure["modeling_tf_convnext"] = [
59
+ "TFConvNextForImageClassification",
60
+ "TFConvNextModel",
61
+ "TFConvNextPreTrainedModel",
62
+ ]
63
+
64
+ if TYPE_CHECKING:
65
+ from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
66
+
67
+ try:
68
+ if not is_vision_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ from .feature_extraction_convnext import ConvNextFeatureExtractor
74
+ from .image_processing_convnext import ConvNextImageProcessor
75
+
76
+ try:
77
+ if not is_torch_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .modeling_convnext import (
83
+ CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
84
+ ConvNextBackbone,
85
+ ConvNextForImageClassification,
86
+ ConvNextModel,
87
+ ConvNextPreTrainedModel,
88
+ )
89
+
90
+ try:
91
+ if not is_tf_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
97
+
98
+
99
+ else:
100
+ import sys
101
+
102
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc ADDED
Binary file (6.03 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc ADDED
Binary file (7.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/feature_extraction_convnext.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/image_processing_convnext.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_convnext.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/modeling_tf_convnext.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ConvNeXT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class ConvNextConfig(BackboneConfigMixin, PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an
37
+ ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
38
+ with the defaults will yield a similar configuration to that of the ConvNeXT
39
+ [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+ Args:
45
+ num_channels (`int`, *optional*, defaults to 3):
46
+ The number of input channels.
47
+ patch_size (`int`, optional, defaults to 4):
48
+ Patch size to use in the patch embedding layer.
49
+ num_stages (`int`, optional, defaults to 4):
50
+ The number of stages in the model.
51
+ hidden_sizes (`List[int]`, *optional*, defaults to [96, 192, 384, 768]):
52
+ Dimensionality (hidden size) at each stage.
53
+ depths (`List[int]`, *optional*, defaults to [3, 3, 9, 3]):
54
+ Depth (number of blocks) for each stage.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
57
+ `"selu"` and `"gelu_new"` are supported.
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
61
+ The epsilon used by the layer normalization layers.
62
+ layer_scale_init_value (`float`, *optional*, defaults to 1e-6):
63
+ The initial value for the layer scale.
64
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
65
+ The drop rate for stochastic depth.
66
+ out_features (`List[str]`, *optional*):
67
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
68
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
69
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
70
+ same order as defined in the `stage_names` attribute.
71
+ out_indices (`List[int]`, *optional*):
72
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
73
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
74
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
75
+ same order as defined in the `stage_names` attribute.
76
+
77
+ Example:
78
+ ```python
79
+ >>> from transformers import ConvNextConfig, ConvNextModel
80
+
81
+ >>> # Initializing a ConvNext convnext-tiny-224 style configuration
82
+ >>> configuration = ConvNextConfig()
83
+
84
+ >>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration
85
+ >>> model = ConvNextModel(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "convnext"
92
+
93
+ def __init__(
94
+ self,
95
+ num_channels=3,
96
+ patch_size=4,
97
+ num_stages=4,
98
+ hidden_sizes=None,
99
+ depths=None,
100
+ hidden_act="gelu",
101
+ initializer_range=0.02,
102
+ layer_norm_eps=1e-12,
103
+ layer_scale_init_value=1e-6,
104
+ drop_path_rate=0.0,
105
+ image_size=224,
106
+ out_features=None,
107
+ out_indices=None,
108
+ **kwargs,
109
+ ):
110
+ super().__init__(**kwargs)
111
+
112
+ self.num_channels = num_channels
113
+ self.patch_size = patch_size
114
+ self.num_stages = num_stages
115
+ self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
116
+ self.depths = [3, 3, 9, 3] if depths is None else depths
117
+ self.hidden_act = hidden_act
118
+ self.initializer_range = initializer_range
119
+ self.layer_norm_eps = layer_norm_eps
120
+ self.layer_scale_init_value = layer_scale_init_value
121
+ self.drop_path_rate = drop_path_rate
122
+ self.image_size = image_size
123
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
124
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
125
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
126
+ )
127
+
128
+
129
+ class ConvNextOnnxConfig(OnnxConfig):
130
+ torch_onnx_minimum_version = version.parse("1.11")
131
+
132
+ @property
133
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
134
+ return OrderedDict(
135
+ [
136
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
137
+ ]
138
+ )
139
+
140
+ @property
141
+ def atol_for_validation(self) -> float:
142
+ return 1e-5
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ConvNext checkpoints from the original repository.
16
+
17
+ URL: https://github.com/facebookresearch/ConvNeXt"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from pathlib import Path
23
+
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import hf_hub_download
27
+ from PIL import Image
28
+
29
+ from transformers import ConvNextConfig, ConvNextForImageClassification, ConvNextImageProcessor
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_convnext_config(checkpoint_url):
38
+ config = ConvNextConfig()
39
+
40
+ if "tiny" in checkpoint_url:
41
+ depths = [3, 3, 9, 3]
42
+ hidden_sizes = [96, 192, 384, 768]
43
+ if "small" in checkpoint_url:
44
+ depths = [3, 3, 27, 3]
45
+ hidden_sizes = [96, 192, 384, 768]
46
+ if "base" in checkpoint_url:
47
+ depths = [3, 3, 27, 3]
48
+ hidden_sizes = [128, 256, 512, 1024]
49
+ if "large" in checkpoint_url:
50
+ depths = [3, 3, 27, 3]
51
+ hidden_sizes = [192, 384, 768, 1536]
52
+ if "xlarge" in checkpoint_url:
53
+ depths = [3, 3, 27, 3]
54
+ hidden_sizes = [256, 512, 1024, 2048]
55
+
56
+ if "1k" in checkpoint_url:
57
+ num_labels = 1000
58
+ filename = "imagenet-1k-id2label.json"
59
+ expected_shape = (1, 1000)
60
+ else:
61
+ num_labels = 21841
62
+ filename = "imagenet-22k-id2label.json"
63
+ expected_shape = (1, 21841)
64
+
65
+ repo_id = "huggingface/label-files"
66
+ config.num_labels = num_labels
67
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
68
+ id2label = {int(k): v for k, v in id2label.items()}
69
+ if "1k" not in checkpoint_url:
70
+ # this dataset contains 21843 labels but the model only has 21841
71
+ # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
72
+ del id2label[9205]
73
+ del id2label[15027]
74
+ config.id2label = id2label
75
+ config.label2id = {v: k for k, v in id2label.items()}
76
+ config.hidden_sizes = hidden_sizes
77
+ config.depths = depths
78
+
79
+ return config, expected_shape
80
+
81
+
82
+ def rename_key(name):
83
+ if "downsample_layers.0.0" in name:
84
+ name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
85
+ if "downsample_layers.0.1" in name:
86
+ name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on
87
+ if "downsample_layers.1.0" in name:
88
+ name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
89
+ if "downsample_layers.1.1" in name:
90
+ name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
91
+ if "downsample_layers.2.0" in name:
92
+ name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
93
+ if "downsample_layers.2.1" in name:
94
+ name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
95
+ if "downsample_layers.3.0" in name:
96
+ name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
97
+ if "downsample_layers.3.1" in name:
98
+ name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
99
+ if "stages" in name and "downsampling_layer" not in name:
100
+ # stages.0.0. for instance should be renamed to stages.0.layers.0.
101
+ name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
102
+ if "stages" in name:
103
+ name = name.replace("stages", "encoder.stages")
104
+ if "norm" in name:
105
+ name = name.replace("norm", "layernorm")
106
+ if "gamma" in name:
107
+ name = name.replace("gamma", "layer_scale_parameter")
108
+ if "head" in name:
109
+ name = name.replace("head", "classifier")
110
+
111
+ return name
112
+
113
+
114
+ # We will verify our results on an image of cute cats
115
+ def prepare_img():
116
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
117
+ im = Image.open(requests.get(url, stream=True).raw)
118
+ return im
119
+
120
+
121
+ @torch.no_grad()
122
+ def convert_convnext_checkpoint(checkpoint_url, pytorch_dump_folder_path):
123
+ """
124
+ Copy/paste/tweak model's weights to our ConvNext structure.
125
+ """
126
+
127
+ # define ConvNext configuration based on URL
128
+ config, expected_shape = get_convnext_config(checkpoint_url)
129
+ # load original state_dict from URL
130
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"]
131
+ # rename keys
132
+ for key in state_dict.copy().keys():
133
+ val = state_dict.pop(key)
134
+ state_dict[rename_key(key)] = val
135
+ # add prefix to all keys expect classifier head
136
+ for key in state_dict.copy().keys():
137
+ val = state_dict.pop(key)
138
+ if not key.startswith("classifier"):
139
+ key = "convnext." + key
140
+ state_dict[key] = val
141
+
142
+ # load HuggingFace model
143
+ model = ConvNextForImageClassification(config)
144
+ model.load_state_dict(state_dict)
145
+ model.eval()
146
+
147
+ # Check outputs on an image, prepared by ConvNextImageProcessor
148
+ size = 224 if "224" in checkpoint_url else 384
149
+ image_processor = ConvNextImageProcessor(size=size)
150
+ pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values
151
+
152
+ logits = model(pixel_values).logits
153
+
154
+ # note: the logits below were obtained without center cropping
155
+ if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth":
156
+ expected_logits = torch.tensor([-0.1210, -0.6605, 0.1918])
157
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth":
158
+ expected_logits = torch.tensor([-0.4473, -0.1847, -0.6365])
159
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth":
160
+ expected_logits = torch.tensor([0.4525, 0.7539, 0.0308])
161
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_384.pth":
162
+ expected_logits = torch.tensor([0.3561, 0.6350, -0.0384])
163
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth":
164
+ expected_logits = torch.tensor([0.4174, -0.0989, 0.1489])
165
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_384.pth":
166
+ expected_logits = torch.tensor([0.2513, -0.1349, -0.1613])
167
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth":
168
+ expected_logits = torch.tensor([1.2980, 0.3631, -0.1198])
169
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth":
170
+ expected_logits = torch.tensor([1.2963, 0.1227, 0.1723])
171
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth":
172
+ expected_logits = torch.tensor([1.7956, 0.8390, 0.2820])
173
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth":
174
+ expected_logits = torch.tensor([-0.2822, -0.0502, -0.0878])
175
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth":
176
+ expected_logits = torch.tensor([-0.5672, -0.0730, -0.4348])
177
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth":
178
+ expected_logits = torch.tensor([0.2681, 0.2365, 0.6246])
179
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth":
180
+ expected_logits = torch.tensor([-0.2642, 0.3931, 0.5116])
181
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth":
182
+ expected_logits = torch.tensor([-0.6677, -0.1873, -0.8379])
183
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth":
184
+ expected_logits = torch.tensor([-0.7749, -0.2967, -0.6444])
185
+ else:
186
+ raise ValueError(f"Unknown URL: {checkpoint_url}")
187
+
188
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-3)
189
+ assert logits.shape == expected_shape
190
+
191
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
192
+ print(f"Saving model to {pytorch_dump_folder_path}")
193
+ model.save_pretrained(pytorch_dump_folder_path)
194
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
195
+ image_processor.save_pretrained(pytorch_dump_folder_path)
196
+
197
+ print("Pushing model to the hub...")
198
+ model_name = "convnext"
199
+ if "tiny" in checkpoint_url:
200
+ model_name += "-tiny"
201
+ elif "small" in checkpoint_url:
202
+ model_name += "-small"
203
+ elif "base" in checkpoint_url:
204
+ model_name += "-base"
205
+ elif "xlarge" in checkpoint_url:
206
+ model_name += "-xlarge"
207
+ elif "large" in checkpoint_url:
208
+ model_name += "-large"
209
+ if "224" in checkpoint_url:
210
+ model_name += "-224"
211
+ elif "384" in checkpoint_url:
212
+ model_name += "-384"
213
+ if "22k" in checkpoint_url and "1k" not in checkpoint_url:
214
+ model_name += "-22k"
215
+ if "22k" in checkpoint_url and "1k" in checkpoint_url:
216
+ model_name += "-22k-1k"
217
+
218
+ model.push_to_hub(
219
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
220
+ organization="nielsr",
221
+ commit_message="Add model",
222
+ )
223
+
224
+
225
+ if __name__ == "__main__":
226
+ parser = argparse.ArgumentParser()
227
+ # Required parameters
228
+ parser.add_argument(
229
+ "--checkpoint_url",
230
+ default="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
231
+ type=str,
232
+ help="URL of the original ConvNeXT checkpoint you'd like to convert.",
233
+ )
234
+ parser.add_argument(
235
+ "--pytorch_dump_folder_path",
236
+ default=None,
237
+ type=str,
238
+ required=True,
239
+ help="Path to the output PyTorch model directory.",
240
+ )
241
+
242
+ args = parser.parse_args()
243
+ convert_convnext_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for ConvNeXT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_convnext import ConvNextImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class ConvNextFeatureExtractor(ConvNextImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use ConvNextImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for ConvNeXT."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ center_crop,
24
+ get_resize_output_image_size,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ IMAGENET_STANDARD_MEAN,
30
+ IMAGENET_STANDARD_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ infer_channel_dimension_format,
35
+ is_scaled_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ valid_images,
39
+ validate_kwargs,
40
+ validate_preprocess_arguments,
41
+ )
42
+ from ...utils import TensorType, is_vision_available, logging
43
+
44
+
45
+ if is_vision_available():
46
+ import PIL
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ class ConvNextImageProcessor(BaseImageProcessor):
53
+ r"""
54
+ Constructs a ConvNeXT image processor.
55
+
56
+ Args:
57
+ do_resize (`bool`, *optional*, defaults to `True`):
58
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden
59
+ by `do_resize` in the `preprocess` method.
60
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`):
61
+ Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is
62
+ resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will
63
+ be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to
64
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can
65
+ be overriden by `size` in the `preprocess` method.
66
+ crop_pct (`float` *optional*, defaults to 224 / 256):
67
+ Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be
68
+ overriden by `crop_pct` in the `preprocess` method.
69
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
70
+ Resampling filter to use if resizing the image. Can be overriden by `resample` in the `preprocess` method.
71
+ do_rescale (`bool`, *optional*, defaults to `True`):
72
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in
73
+ the `preprocess` method.
74
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
75
+ Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess`
76
+ method.
77
+ do_normalize (`bool`, *optional*, defaults to `True`):
78
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
79
+ method.
80
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
81
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
82
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
83
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
84
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
85
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
86
+ """
87
+
88
+ model_input_names = ["pixel_values"]
89
+
90
+ def __init__(
91
+ self,
92
+ do_resize: bool = True,
93
+ size: Dict[str, int] = None,
94
+ crop_pct: float = None,
95
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
96
+ do_rescale: bool = True,
97
+ rescale_factor: Union[int, float] = 1 / 255,
98
+ do_normalize: bool = True,
99
+ image_mean: Optional[Union[float, List[float]]] = None,
100
+ image_std: Optional[Union[float, List[float]]] = None,
101
+ **kwargs,
102
+ ) -> None:
103
+ super().__init__(**kwargs)
104
+ size = size if size is not None else {"shortest_edge": 384}
105
+ size = get_size_dict(size, default_to_square=False)
106
+
107
+ self.do_resize = do_resize
108
+ self.size = size
109
+ # Default value set here for backwards compatibility where the value in config is None
110
+ self.crop_pct = crop_pct if crop_pct is not None else 224 / 256
111
+ self.resample = resample
112
+ self.do_rescale = do_rescale
113
+ self.rescale_factor = rescale_factor
114
+ self.do_normalize = do_normalize
115
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
116
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
117
+ self._valid_processor_keys = [
118
+ "images",
119
+ "do_resize",
120
+ "size",
121
+ "crop_pct",
122
+ "resample",
123
+ "do_rescale",
124
+ "rescale_factor",
125
+ "do_normalize",
126
+ "image_mean",
127
+ "image_std",
128
+ "return_tensors",
129
+ "data_format",
130
+ "input_data_format",
131
+ ]
132
+
133
+ def resize(
134
+ self,
135
+ image: np.ndarray,
136
+ size: Dict[str, int],
137
+ crop_pct: float,
138
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
139
+ data_format: Optional[Union[str, ChannelDimension]] = None,
140
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
141
+ **kwargs,
142
+ ) -> np.ndarray:
143
+ """
144
+ Resize an image.
145
+
146
+ Args:
147
+ image (`np.ndarray`):
148
+ Image to resize.
149
+ size (`Dict[str, int]`):
150
+ Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If
151
+ `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`.
152
+ Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`,
153
+ after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`.
154
+ crop_pct (`float`):
155
+ Percentage of the image to crop. Only has an effect if size < 384.
156
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
157
+ Resampling filter to use when resizing the image.
158
+ data_format (`str` or `ChannelDimension`, *optional*):
159
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
160
+ input_data_format (`ChannelDimension` or `str`, *optional*):
161
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
162
+ image.
163
+ """
164
+ size = get_size_dict(size, default_to_square=False)
165
+ if "shortest_edge" not in size:
166
+ raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
167
+ shortest_edge = size["shortest_edge"]
168
+
169
+ if shortest_edge < 384:
170
+ # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
171
+ resize_shortest_edge = int(shortest_edge / crop_pct)
172
+ resize_size = get_resize_output_image_size(
173
+ image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format
174
+ )
175
+ image = resize(
176
+ image=image,
177
+ size=resize_size,
178
+ resample=resample,
179
+ data_format=data_format,
180
+ input_data_format=input_data_format,
181
+ **kwargs,
182
+ )
183
+ # then crop to (shortest_edge, shortest_edge)
184
+ return center_crop(
185
+ image=image,
186
+ size=(shortest_edge, shortest_edge),
187
+ data_format=data_format,
188
+ input_data_format=input_data_format,
189
+ **kwargs,
190
+ )
191
+ else:
192
+ # warping (no cropping) when evaluated at 384 or larger
193
+ return resize(
194
+ image,
195
+ size=(shortest_edge, shortest_edge),
196
+ resample=resample,
197
+ data_format=data_format,
198
+ input_data_format=input_data_format,
199
+ **kwargs,
200
+ )
201
+
202
+ def preprocess(
203
+ self,
204
+ images: ImageInput,
205
+ do_resize: bool = None,
206
+ size: Dict[str, int] = None,
207
+ crop_pct: float = None,
208
+ resample: PILImageResampling = None,
209
+ do_rescale: bool = None,
210
+ rescale_factor: float = None,
211
+ do_normalize: bool = None,
212
+ image_mean: Optional[Union[float, List[float]]] = None,
213
+ image_std: Optional[Union[float, List[float]]] = None,
214
+ return_tensors: Optional[Union[str, TensorType]] = None,
215
+ data_format: ChannelDimension = ChannelDimension.FIRST,
216
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
217
+ **kwargs,
218
+ ) -> PIL.Image.Image:
219
+ """
220
+ Preprocess an image or batch of images.
221
+
222
+ Args:
223
+ images (`ImageInput`):
224
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
225
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
226
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
227
+ Whether to resize the image.
228
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
229
+ Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
230
+ is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
231
+ image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
232
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
233
+ crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
234
+ Percentage of the image to crop if size < 384.
235
+ resample (`int`, *optional*, defaults to `self.resample`):
236
+ Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
237
+ has an effect if `do_resize` is set to `True`.
238
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
239
+ Whether to rescale the image values between [0 - 1].
240
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
241
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
242
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
243
+ Whether to normalize the image.
244
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
245
+ Image mean.
246
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
247
+ Image standard deviation.
248
+ return_tensors (`str` or `TensorType`, *optional*):
249
+ The type of tensors to return. Can be one of:
250
+ - Unset: Return a list of `np.ndarray`.
251
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
252
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
253
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
254
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
255
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
256
+ The channel dimension format for the output image. Can be one of:
257
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
258
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
259
+ - Unset: Use the channel dimension format of the input image.
260
+ input_data_format (`ChannelDimension` or `str`, *optional*):
261
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
262
+ from the input image. Can be one of:
263
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
264
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
265
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
266
+ """
267
+ do_resize = do_resize if do_resize is not None else self.do_resize
268
+ crop_pct = crop_pct if crop_pct is not None else self.crop_pct
269
+ resample = resample if resample is not None else self.resample
270
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
271
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
272
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
273
+ image_mean = image_mean if image_mean is not None else self.image_mean
274
+ image_std = image_std if image_std is not None else self.image_std
275
+
276
+ size = size if size is not None else self.size
277
+ size = get_size_dict(size, default_to_square=False)
278
+
279
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
280
+
281
+ images = make_list_of_images(images)
282
+
283
+ if not valid_images(images):
284
+ raise ValueError(
285
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
286
+ "torch.Tensor, tf.Tensor or jax.ndarray."
287
+ )
288
+
289
+ validate_preprocess_arguments(
290
+ do_rescale=do_rescale,
291
+ rescale_factor=rescale_factor,
292
+ do_normalize=do_normalize,
293
+ image_mean=image_mean,
294
+ image_std=image_std,
295
+ do_resize=do_resize,
296
+ size=size,
297
+ resample=resample,
298
+ )
299
+
300
+ # All transformations expect numpy arrays.
301
+ images = [to_numpy_array(image) for image in images]
302
+
303
+ if is_scaled_image(images[0]) and do_rescale:
304
+ logger.warning_once(
305
+ "It looks like you are trying to rescale already rescaled images. If the input"
306
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
307
+ )
308
+
309
+ if input_data_format is None:
310
+ # We assume that all images have the same channel dimension format.
311
+ input_data_format = infer_channel_dimension_format(images[0])
312
+
313
+ if do_resize:
314
+ images = [
315
+ self.resize(
316
+ image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format
317
+ )
318
+ for image in images
319
+ ]
320
+
321
+ if do_rescale:
322
+ images = [
323
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
324
+ for image in images
325
+ ]
326
+
327
+ if do_normalize:
328
+ images = [
329
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
330
+ for image in images
331
+ ]
332
+
333
+ images = [
334
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
335
+ ]
336
+
337
+ data = {"pixel_values": images}
338
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py ADDED
@@ -0,0 +1,551 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ConvNext model."""
16
+
17
+
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BackboneOutput,
28
+ BaseModelOutputWithNoAttention,
29
+ BaseModelOutputWithPoolingAndNoAttention,
30
+ ImageClassifierOutputWithNoAttention,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import (
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ replace_return_docstrings,
39
+ )
40
+ from ...utils.backbone_utils import BackboneMixin
41
+ from .configuration_convnext import ConvNextConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ # General docstring
47
+ _CONFIG_FOR_DOC = "ConvNextConfig"
48
+
49
+ # Base docstring
50
+ _CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
51
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
52
+
53
+ # Image classification docstring
54
+ _IMAGE_CLASS_CHECKPOINT = "facebook/convnext-tiny-224"
55
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
56
+
57
+
58
+ from ..deprecated._archive_maps import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
59
+
60
+
61
+ # Copied from transformers.models.beit.modeling_beit.drop_path
62
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
63
+ """
64
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
65
+
66
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
67
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
68
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
69
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
70
+ argument.
71
+ """
72
+ if drop_prob == 0.0 or not training:
73
+ return input
74
+ keep_prob = 1 - drop_prob
75
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
76
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
77
+ random_tensor.floor_() # binarize
78
+ output = input.div(keep_prob) * random_tensor
79
+ return output
80
+
81
+
82
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNext
83
+ class ConvNextDropPath(nn.Module):
84
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
85
+
86
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
87
+ super().__init__()
88
+ self.drop_prob = drop_prob
89
+
90
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
91
+ return drop_path(hidden_states, self.drop_prob, self.training)
92
+
93
+ def extra_repr(self) -> str:
94
+ return "p={}".format(self.drop_prob)
95
+
96
+
97
+ class ConvNextLayerNorm(nn.Module):
98
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
99
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
100
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
101
+ """
102
+
103
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
104
+ super().__init__()
105
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
106
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
107
+ self.eps = eps
108
+ self.data_format = data_format
109
+ if self.data_format not in ["channels_last", "channels_first"]:
110
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
111
+ self.normalized_shape = (normalized_shape,)
112
+
113
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
114
+ if self.data_format == "channels_last":
115
+ x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
116
+ elif self.data_format == "channels_first":
117
+ input_dtype = x.dtype
118
+ x = x.float()
119
+ u = x.mean(1, keepdim=True)
120
+ s = (x - u).pow(2).mean(1, keepdim=True)
121
+ x = (x - u) / torch.sqrt(s + self.eps)
122
+ x = x.to(dtype=input_dtype)
123
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
124
+ return x
125
+
126
+
127
+ class ConvNextEmbeddings(nn.Module):
128
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
129
+ found in src/transformers/models/swin/modeling_swin.py.
130
+ """
131
+
132
+ def __init__(self, config):
133
+ super().__init__()
134
+ self.patch_embeddings = nn.Conv2d(
135
+ config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
136
+ )
137
+ self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
138
+ self.num_channels = config.num_channels
139
+
140
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
141
+ num_channels = pixel_values.shape[1]
142
+ if num_channels != self.num_channels:
143
+ raise ValueError(
144
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
145
+ )
146
+ embeddings = self.patch_embeddings(pixel_values)
147
+ embeddings = self.layernorm(embeddings)
148
+ return embeddings
149
+
150
+
151
+ class ConvNextLayer(nn.Module):
152
+ """This corresponds to the `Block` class in the original implementation.
153
+
154
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
155
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
156
+
157
+ The authors used (2) as they find it slightly faster in PyTorch.
158
+
159
+ Args:
160
+ config ([`ConvNextConfig`]): Model configuration class.
161
+ dim (`int`): Number of input channels.
162
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
163
+ """
164
+
165
+ def __init__(self, config, dim, drop_path=0):
166
+ super().__init__()
167
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
168
+ self.layernorm = ConvNextLayerNorm(dim, eps=1e-6)
169
+ self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
170
+ self.act = ACT2FN[config.hidden_act]
171
+ self.pwconv2 = nn.Linear(4 * dim, dim)
172
+ self.layer_scale_parameter = (
173
+ nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
174
+ if config.layer_scale_init_value > 0
175
+ else None
176
+ )
177
+ self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
178
+
179
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
180
+ input = hidden_states
181
+ x = self.dwconv(hidden_states)
182
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
183
+ x = self.layernorm(x)
184
+ x = self.pwconv1(x)
185
+ x = self.act(x)
186
+ x = self.pwconv2(x)
187
+ if self.layer_scale_parameter is not None:
188
+ x = self.layer_scale_parameter * x
189
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
190
+
191
+ x = input + self.drop_path(x)
192
+ return x
193
+
194
+
195
+ class ConvNextStage(nn.Module):
196
+ """ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks.
197
+
198
+ Args:
199
+ config ([`ConvNextConfig`]): Model configuration class.
200
+ in_channels (`int`): Number of input channels.
201
+ out_channels (`int`): Number of output channels.
202
+ depth (`int`): Number of residual blocks.
203
+ drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
204
+ """
205
+
206
+ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
207
+ super().__init__()
208
+
209
+ if in_channels != out_channels or stride > 1:
210
+ self.downsampling_layer = nn.Sequential(
211
+ ConvNextLayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
212
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
213
+ )
214
+ else:
215
+ self.downsampling_layer = nn.Identity()
216
+ drop_path_rates = drop_path_rates or [0.0] * depth
217
+ self.layers = nn.Sequential(
218
+ *[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
219
+ )
220
+
221
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
222
+ hidden_states = self.downsampling_layer(hidden_states)
223
+ hidden_states = self.layers(hidden_states)
224
+ return hidden_states
225
+
226
+
227
+ class ConvNextEncoder(nn.Module):
228
+ def __init__(self, config):
229
+ super().__init__()
230
+ self.stages = nn.ModuleList()
231
+ drop_path_rates = [
232
+ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
233
+ ]
234
+ prev_chs = config.hidden_sizes[0]
235
+ for i in range(config.num_stages):
236
+ out_chs = config.hidden_sizes[i]
237
+ stage = ConvNextStage(
238
+ config,
239
+ in_channels=prev_chs,
240
+ out_channels=out_chs,
241
+ stride=2 if i > 0 else 1,
242
+ depth=config.depths[i],
243
+ drop_path_rates=drop_path_rates[i],
244
+ )
245
+ self.stages.append(stage)
246
+ prev_chs = out_chs
247
+
248
+ def forward(
249
+ self,
250
+ hidden_states: torch.FloatTensor,
251
+ output_hidden_states: Optional[bool] = False,
252
+ return_dict: Optional[bool] = True,
253
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
254
+ all_hidden_states = () if output_hidden_states else None
255
+
256
+ for i, layer_module in enumerate(self.stages):
257
+ if output_hidden_states:
258
+ all_hidden_states = all_hidden_states + (hidden_states,)
259
+
260
+ hidden_states = layer_module(hidden_states)
261
+
262
+ if output_hidden_states:
263
+ all_hidden_states = all_hidden_states + (hidden_states,)
264
+
265
+ if not return_dict:
266
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
267
+
268
+ return BaseModelOutputWithNoAttention(
269
+ last_hidden_state=hidden_states,
270
+ hidden_states=all_hidden_states,
271
+ )
272
+
273
+
274
+ class ConvNextPreTrainedModel(PreTrainedModel):
275
+ """
276
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
277
+ models.
278
+ """
279
+
280
+ config_class = ConvNextConfig
281
+ base_model_prefix = "convnext"
282
+ main_input_name = "pixel_values"
283
+
284
+ def _init_weights(self, module):
285
+ """Initialize the weights"""
286
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
287
+ # Slightly different from the TF version which uses truncated_normal for initialization
288
+ # cf https://github.com/pytorch/pytorch/pull/5617
289
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
290
+ if module.bias is not None:
291
+ module.bias.data.zero_()
292
+ elif isinstance(module, nn.LayerNorm):
293
+ module.bias.data.zero_()
294
+ module.weight.data.fill_(1.0)
295
+
296
+
297
+ CONVNEXT_START_DOCSTRING = r"""
298
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
299
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
300
+ behavior.
301
+
302
+ Parameters:
303
+ config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
304
+ Initializing with a config file does not load the weights associated with the model, only the
305
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
306
+ """
307
+
308
+ CONVNEXT_INPUTS_DOCSTRING = r"""
309
+ Args:
310
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
311
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
312
+ [`ConvNextImageProcessor.__call__`] for details.
313
+
314
+ output_hidden_states (`bool`, *optional*):
315
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
316
+ more detail.
317
+ return_dict (`bool`, *optional*):
318
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
319
+ """
320
+
321
+
322
+ @add_start_docstrings(
323
+ "The bare ConvNext model outputting raw features without any specific head on top.",
324
+ CONVNEXT_START_DOCSTRING,
325
+ )
326
+ class ConvNextModel(ConvNextPreTrainedModel):
327
+ def __init__(self, config):
328
+ super().__init__(config)
329
+ self.config = config
330
+
331
+ self.embeddings = ConvNextEmbeddings(config)
332
+ self.encoder = ConvNextEncoder(config)
333
+
334
+ # final layernorm layer
335
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
336
+
337
+ # Initialize weights and apply final processing
338
+ self.post_init()
339
+
340
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
341
+ @add_code_sample_docstrings(
342
+ checkpoint=_CHECKPOINT_FOR_DOC,
343
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
344
+ config_class=_CONFIG_FOR_DOC,
345
+ modality="vision",
346
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
347
+ )
348
+ def forward(
349
+ self,
350
+ pixel_values: torch.FloatTensor = None,
351
+ output_hidden_states: Optional[bool] = None,
352
+ return_dict: Optional[bool] = None,
353
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
354
+ output_hidden_states = (
355
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
356
+ )
357
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
358
+
359
+ if pixel_values is None:
360
+ raise ValueError("You have to specify pixel_values")
361
+
362
+ embedding_output = self.embeddings(pixel_values)
363
+
364
+ encoder_outputs = self.encoder(
365
+ embedding_output,
366
+ output_hidden_states=output_hidden_states,
367
+ return_dict=return_dict,
368
+ )
369
+
370
+ last_hidden_state = encoder_outputs[0]
371
+
372
+ # global average pooling, (N, C, H, W) -> (N, C)
373
+ pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
374
+
375
+ if not return_dict:
376
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
377
+
378
+ return BaseModelOutputWithPoolingAndNoAttention(
379
+ last_hidden_state=last_hidden_state,
380
+ pooler_output=pooled_output,
381
+ hidden_states=encoder_outputs.hidden_states,
382
+ )
383
+
384
+
385
+ @add_start_docstrings(
386
+ """
387
+ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
388
+ ImageNet.
389
+ """,
390
+ CONVNEXT_START_DOCSTRING,
391
+ )
392
+ class ConvNextForImageClassification(ConvNextPreTrainedModel):
393
+ def __init__(self, config):
394
+ super().__init__(config)
395
+
396
+ self.num_labels = config.num_labels
397
+ self.convnext = ConvNextModel(config)
398
+
399
+ # Classifier head
400
+ self.classifier = (
401
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
402
+ )
403
+
404
+ # Initialize weights and apply final processing
405
+ self.post_init()
406
+
407
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
408
+ @add_code_sample_docstrings(
409
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
410
+ output_type=ImageClassifierOutputWithNoAttention,
411
+ config_class=_CONFIG_FOR_DOC,
412
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
413
+ )
414
+ def forward(
415
+ self,
416
+ pixel_values: torch.FloatTensor = None,
417
+ labels: Optional[torch.LongTensor] = None,
418
+ output_hidden_states: Optional[bool] = None,
419
+ return_dict: Optional[bool] = None,
420
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
421
+ r"""
422
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
423
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
424
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
425
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
426
+ """
427
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
428
+
429
+ outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
430
+
431
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
432
+
433
+ logits = self.classifier(pooled_output)
434
+
435
+ loss = None
436
+ if labels is not None:
437
+ if self.config.problem_type is None:
438
+ if self.num_labels == 1:
439
+ self.config.problem_type = "regression"
440
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
441
+ self.config.problem_type = "single_label_classification"
442
+ else:
443
+ self.config.problem_type = "multi_label_classification"
444
+
445
+ if self.config.problem_type == "regression":
446
+ loss_fct = MSELoss()
447
+ if self.num_labels == 1:
448
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
449
+ else:
450
+ loss = loss_fct(logits, labels)
451
+ elif self.config.problem_type == "single_label_classification":
452
+ loss_fct = CrossEntropyLoss()
453
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
454
+ elif self.config.problem_type == "multi_label_classification":
455
+ loss_fct = BCEWithLogitsLoss()
456
+ loss = loss_fct(logits, labels)
457
+ if not return_dict:
458
+ output = (logits,) + outputs[2:]
459
+ return ((loss,) + output) if loss is not None else output
460
+
461
+ return ImageClassifierOutputWithNoAttention(
462
+ loss=loss,
463
+ logits=logits,
464
+ hidden_states=outputs.hidden_states,
465
+ )
466
+
467
+
468
+ @add_start_docstrings(
469
+ """
470
+ ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer.
471
+ """,
472
+ CONVNEXT_START_DOCSTRING,
473
+ )
474
+ class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin):
475
+ def __init__(self, config):
476
+ super().__init__(config)
477
+ super()._init_backbone(config)
478
+
479
+ self.embeddings = ConvNextEmbeddings(config)
480
+ self.encoder = ConvNextEncoder(config)
481
+ self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
482
+
483
+ # Add layer norms to hidden states of out_features
484
+ hidden_states_norms = {}
485
+ for stage, num_channels in zip(self._out_features, self.channels):
486
+ hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format="channels_first")
487
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
488
+
489
+ # initialize weights and apply final processing
490
+ self.post_init()
491
+
492
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
493
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
494
+ def forward(
495
+ self,
496
+ pixel_values: torch.Tensor,
497
+ output_hidden_states: Optional[bool] = None,
498
+ return_dict: Optional[bool] = None,
499
+ ) -> BackboneOutput:
500
+ """
501
+ Returns:
502
+
503
+ Examples:
504
+
505
+ ```python
506
+ >>> from transformers import AutoImageProcessor, AutoBackbone
507
+ >>> import torch
508
+ >>> from PIL import Image
509
+ >>> import requests
510
+
511
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
512
+ >>> image = Image.open(requests.get(url, stream=True).raw)
513
+
514
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
515
+ >>> model = AutoBackbone.from_pretrained("facebook/convnext-tiny-224")
516
+
517
+ >>> inputs = processor(image, return_tensors="pt")
518
+ >>> outputs = model(**inputs)
519
+ ```"""
520
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
521
+ output_hidden_states = (
522
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
523
+ )
524
+
525
+ embedding_output = self.embeddings(pixel_values)
526
+
527
+ outputs = self.encoder(
528
+ embedding_output,
529
+ output_hidden_states=True,
530
+ return_dict=return_dict,
531
+ )
532
+
533
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
534
+
535
+ feature_maps = ()
536
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
537
+ if stage in self.out_features:
538
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
539
+ feature_maps += (hidden_state,)
540
+
541
+ if not return_dict:
542
+ output = (feature_maps,)
543
+ if output_hidden_states:
544
+ output += (hidden_states,)
545
+ return output
546
+
547
+ return BackboneOutput(
548
+ feature_maps=feature_maps,
549
+ hidden_states=hidden_states if output_hidden_states else None,
550
+ attentions=None,
551
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 ConvNext model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput
27
+ from ...modeling_tf_utils import (
28
+ TFModelInputType,
29
+ TFPreTrainedModel,
30
+ TFSequenceClassificationLoss,
31
+ get_initializer,
32
+ keras,
33
+ keras_serializable,
34
+ unpack_inputs,
35
+ )
36
+ from ...tf_utils import shape_list
37
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
38
+ from .configuration_convnext import ConvNextConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ _CONFIG_FOR_DOC = "ConvNextConfig"
45
+ _CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
46
+
47
+
48
+ class TFConvNextDropPath(keras.layers.Layer):
49
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
50
+ References:
51
+ (1) github.com:rwightman/pytorch-image-models
52
+ """
53
+
54
+ def __init__(self, drop_path: float, **kwargs):
55
+ super().__init__(**kwargs)
56
+ self.drop_path = drop_path
57
+
58
+ def call(self, x: tf.Tensor, training=None):
59
+ if training:
60
+ keep_prob = 1 - self.drop_path
61
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
62
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
63
+ random_tensor = tf.floor(random_tensor)
64
+ return (x / keep_prob) * random_tensor
65
+ return x
66
+
67
+
68
+ class TFConvNextEmbeddings(keras.layers.Layer):
69
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
70
+ found in src/transformers/models/swin/modeling_swin.py.
71
+ """
72
+
73
+ def __init__(self, config: ConvNextConfig, **kwargs):
74
+ super().__init__(**kwargs)
75
+ self.patch_embeddings = keras.layers.Conv2D(
76
+ filters=config.hidden_sizes[0],
77
+ kernel_size=config.patch_size,
78
+ strides=config.patch_size,
79
+ name="patch_embeddings",
80
+ kernel_initializer=get_initializer(config.initializer_range),
81
+ bias_initializer=keras.initializers.Zeros(),
82
+ )
83
+ self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
84
+ self.num_channels = config.num_channels
85
+ self.config = config
86
+
87
+ def call(self, pixel_values):
88
+ if isinstance(pixel_values, dict):
89
+ pixel_values = pixel_values["pixel_values"]
90
+
91
+ tf.debugging.assert_equal(
92
+ shape_list(pixel_values)[1],
93
+ self.num_channels,
94
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
95
+ )
96
+
97
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
98
+ # So change the input format from `NCHW` to `NHWC`.
99
+ # shape = (batch_size, in_height, in_width, in_channels)
100
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
101
+
102
+ embeddings = self.patch_embeddings(pixel_values)
103
+ embeddings = self.layernorm(embeddings)
104
+ return embeddings
105
+
106
+ def build(self, input_shape=None):
107
+ if self.built:
108
+ return
109
+ self.built = True
110
+ if getattr(self, "patch_embeddings", None) is not None:
111
+ with tf.name_scope(self.patch_embeddings.name):
112
+ self.patch_embeddings.build([None, None, None, self.config.num_channels])
113
+ if getattr(self, "layernorm", None) is not None:
114
+ with tf.name_scope(self.layernorm.name):
115
+ self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
116
+
117
+
118
+ class TFConvNextLayer(keras.layers.Layer):
119
+ """This corresponds to the `Block` class in the original implementation.
120
+
121
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
122
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
123
+
124
+ The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
125
+ NHWC ordering, we can just apply the operations straight-away without the permutation.
126
+
127
+ Args:
128
+ config ([`ConvNextConfig`]): Model configuration class.
129
+ dim (`int`): Number of input channels.
130
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
131
+ """
132
+
133
+ def __init__(self, config, dim, drop_path=0.0, **kwargs):
134
+ super().__init__(**kwargs)
135
+ self.dim = dim
136
+ self.config = config
137
+ self.dwconv = keras.layers.Conv2D(
138
+ filters=dim,
139
+ kernel_size=7,
140
+ padding="same",
141
+ groups=dim,
142
+ kernel_initializer=get_initializer(config.initializer_range),
143
+ bias_initializer="zeros",
144
+ name="dwconv",
145
+ ) # depthwise conv
146
+ self.layernorm = keras.layers.LayerNormalization(
147
+ epsilon=1e-6,
148
+ name="layernorm",
149
+ )
150
+ self.pwconv1 = keras.layers.Dense(
151
+ units=4 * dim,
152
+ kernel_initializer=get_initializer(config.initializer_range),
153
+ bias_initializer="zeros",
154
+ name="pwconv1",
155
+ ) # pointwise/1x1 convs, implemented with linear layers
156
+ self.act = get_tf_activation(config.hidden_act)
157
+ self.pwconv2 = keras.layers.Dense(
158
+ units=dim,
159
+ kernel_initializer=get_initializer(config.initializer_range),
160
+ bias_initializer="zeros",
161
+ name="pwconv2",
162
+ )
163
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
164
+ # behaviour.
165
+ self.drop_path = (
166
+ TFConvNextDropPath(drop_path, name="drop_path")
167
+ if drop_path > 0.0
168
+ else keras.layers.Activation("linear", name="drop_path")
169
+ )
170
+
171
+ def build(self, input_shape: tf.TensorShape = None):
172
+ # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
173
+ self.layer_scale_parameter = (
174
+ self.add_weight(
175
+ shape=(self.dim,),
176
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
177
+ trainable=True,
178
+ name="layer_scale_parameter",
179
+ )
180
+ if self.config.layer_scale_init_value > 0
181
+ else None
182
+ )
183
+
184
+ if self.built:
185
+ return
186
+ self.built = True
187
+ if getattr(self, "dwconv", None) is not None:
188
+ with tf.name_scope(self.dwconv.name):
189
+ self.dwconv.build([None, None, None, self.dim])
190
+ if getattr(self, "layernorm", None) is not None:
191
+ with tf.name_scope(self.layernorm.name):
192
+ self.layernorm.build([None, None, None, self.dim])
193
+ if getattr(self, "pwconv1", None) is not None:
194
+ with tf.name_scope(self.pwconv1.name):
195
+ self.pwconv1.build([None, None, self.dim])
196
+ if getattr(self, "pwconv2", None) is not None:
197
+ with tf.name_scope(self.pwconv2.name):
198
+ self.pwconv2.build([None, None, 4 * self.dim])
199
+ if getattr(self, "drop_path", None) is not None:
200
+ with tf.name_scope(self.drop_path.name):
201
+ self.drop_path.build(None)
202
+
203
+ def call(self, hidden_states, training=False):
204
+ input = hidden_states
205
+ x = self.dwconv(hidden_states)
206
+ x = self.layernorm(x)
207
+ x = self.pwconv1(x)
208
+ x = self.act(x)
209
+ x = self.pwconv2(x)
210
+
211
+ if self.layer_scale_parameter is not None:
212
+ x = self.layer_scale_parameter * x
213
+
214
+ x = input + self.drop_path(x, training=training)
215
+ return x
216
+
217
+
218
+ class TFConvNextStage(keras.layers.Layer):
219
+ """ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks.
220
+
221
+ Args:
222
+ config (`ConvNextV2Config`):
223
+ Model configuration class.
224
+ in_channels (`int`):
225
+ Number of input channels.
226
+ out_channels (`int`):
227
+ Number of output channels.
228
+ depth (`int`):
229
+ Number of residual blocks.
230
+ drop_path_rates(`List[float]`):
231
+ Stochastic depth rates for each layer.
232
+ """
233
+
234
+ def __init__(
235
+ self,
236
+ config: ConvNextConfig,
237
+ in_channels: int,
238
+ out_channels: int,
239
+ kernel_size: int = 2,
240
+ stride: int = 2,
241
+ depth: int = 2,
242
+ drop_path_rates: Optional[List[float]] = None,
243
+ **kwargs,
244
+ ):
245
+ super().__init__(**kwargs)
246
+ if in_channels != out_channels or stride > 1:
247
+ self.downsampling_layer = [
248
+ keras.layers.LayerNormalization(
249
+ epsilon=1e-6,
250
+ name="downsampling_layer.0",
251
+ ),
252
+ # Inputs to this layer will follow NHWC format since we
253
+ # transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings`
254
+ # layer. All the outputs throughout the model will be in NHWC
255
+ # from this point on until the output where we again change to
256
+ # NCHW.
257
+ keras.layers.Conv2D(
258
+ filters=out_channels,
259
+ kernel_size=kernel_size,
260
+ strides=stride,
261
+ kernel_initializer=get_initializer(config.initializer_range),
262
+ bias_initializer=keras.initializers.Zeros(),
263
+ name="downsampling_layer.1",
264
+ ),
265
+ ]
266
+ else:
267
+ self.downsampling_layer = [tf.identity]
268
+
269
+ drop_path_rates = drop_path_rates or [0.0] * depth
270
+ self.layers = [
271
+ TFConvNextLayer(
272
+ config,
273
+ dim=out_channels,
274
+ drop_path=drop_path_rates[j],
275
+ name=f"layers.{j}",
276
+ )
277
+ for j in range(depth)
278
+ ]
279
+ self.in_channels = in_channels
280
+ self.out_channels = out_channels
281
+ self.stride = stride
282
+
283
+ def call(self, hidden_states):
284
+ for layer in self.downsampling_layer:
285
+ hidden_states = layer(hidden_states)
286
+ for layer in self.layers:
287
+ hidden_states = layer(hidden_states)
288
+ return hidden_states
289
+
290
+ def build(self, input_shape=None):
291
+ if self.built:
292
+ return
293
+ self.built = True
294
+ if getattr(self, "layers", None) is not None:
295
+ for layer in self.layers:
296
+ with tf.name_scope(layer.name):
297
+ layer.build(None)
298
+ if self.in_channels != self.out_channels or self.stride > 1:
299
+ with tf.name_scope(self.downsampling_layer[0].name):
300
+ self.downsampling_layer[0].build([None, None, None, self.in_channels])
301
+ with tf.name_scope(self.downsampling_layer[1].name):
302
+ self.downsampling_layer[1].build([None, None, None, self.in_channels])
303
+
304
+
305
+ class TFConvNextEncoder(keras.layers.Layer):
306
+ def __init__(self, config, **kwargs):
307
+ super().__init__(**kwargs)
308
+ self.stages = []
309
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
310
+ drop_path_rates = tf.split(drop_path_rates, config.depths)
311
+ drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
312
+ prev_chs = config.hidden_sizes[0]
313
+ for i in range(config.num_stages):
314
+ out_chs = config.hidden_sizes[i]
315
+ stage = TFConvNextStage(
316
+ config,
317
+ in_channels=prev_chs,
318
+ out_channels=out_chs,
319
+ stride=2 if i > 0 else 1,
320
+ depth=config.depths[i],
321
+ drop_path_rates=drop_path_rates[i],
322
+ name=f"stages.{i}",
323
+ )
324
+ self.stages.append(stage)
325
+ prev_chs = out_chs
326
+
327
+ def call(self, hidden_states, output_hidden_states=False, return_dict=True):
328
+ all_hidden_states = () if output_hidden_states else None
329
+
330
+ for i, layer_module in enumerate(self.stages):
331
+ if output_hidden_states:
332
+ all_hidden_states = all_hidden_states + (hidden_states,)
333
+
334
+ hidden_states = layer_module(hidden_states)
335
+
336
+ if output_hidden_states:
337
+ all_hidden_states = all_hidden_states + (hidden_states,)
338
+
339
+ if not return_dict:
340
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
341
+
342
+ return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
343
+
344
+ def build(self, input_shape=None):
345
+ for stage in self.stages:
346
+ with tf.name_scope(stage.name):
347
+ stage.build(None)
348
+
349
+
350
+ @keras_serializable
351
+ class TFConvNextMainLayer(keras.layers.Layer):
352
+ config_class = ConvNextConfig
353
+
354
+ def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs):
355
+ super().__init__(**kwargs)
356
+
357
+ self.config = config
358
+ self.embeddings = TFConvNextEmbeddings(config, name="embeddings")
359
+ self.encoder = TFConvNextEncoder(config, name="encoder")
360
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
361
+ # We are setting the `data_format` like so because from here on we will revert to the
362
+ # NCHW output format
363
+ self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_first") if add_pooling_layer else None
364
+
365
+ @unpack_inputs
366
+ def call(
367
+ self,
368
+ pixel_values: TFModelInputType | None = None,
369
+ output_hidden_states: Optional[bool] = None,
370
+ return_dict: Optional[bool] = None,
371
+ training: bool = False,
372
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
373
+ output_hidden_states = (
374
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
375
+ )
376
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
377
+
378
+ if pixel_values is None:
379
+ raise ValueError("You have to specify pixel_values")
380
+
381
+ embedding_output = self.embeddings(pixel_values, training=training)
382
+
383
+ encoder_outputs = self.encoder(
384
+ embedding_output,
385
+ output_hidden_states=output_hidden_states,
386
+ return_dict=return_dict,
387
+ training=training,
388
+ )
389
+
390
+ last_hidden_state = encoder_outputs[0]
391
+ # Change to NCHW output format have uniformity in the modules
392
+ last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
393
+ pooled_output = self.layernorm(self.pooler(last_hidden_state))
394
+
395
+ # Change the other hidden state outputs to NCHW as well
396
+ if output_hidden_states:
397
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
398
+
399
+ if not return_dict:
400
+ hidden_states = hidden_states if output_hidden_states else ()
401
+ return (last_hidden_state, pooled_output) + hidden_states
402
+
403
+ return TFBaseModelOutputWithPooling(
404
+ last_hidden_state=last_hidden_state,
405
+ pooler_output=pooled_output,
406
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
407
+ )
408
+
409
+ def build(self, input_shape=None):
410
+ if self.built:
411
+ return
412
+ self.built = True
413
+ if getattr(self, "embeddings", None) is not None:
414
+ with tf.name_scope(self.embeddings.name):
415
+ self.embeddings.build(None)
416
+ if getattr(self, "encoder", None) is not None:
417
+ with tf.name_scope(self.encoder.name):
418
+ self.encoder.build(None)
419
+ if getattr(self, "layernorm", None) is not None:
420
+ with tf.name_scope(self.layernorm.name):
421
+ self.layernorm.build([None, self.config.hidden_sizes[-1]])
422
+
423
+
424
+ class TFConvNextPreTrainedModel(TFPreTrainedModel):
425
+ """
426
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
427
+ models.
428
+ """
429
+
430
+ config_class = ConvNextConfig
431
+ base_model_prefix = "convnext"
432
+ main_input_name = "pixel_values"
433
+
434
+
435
+ CONVNEXT_START_DOCSTRING = r"""
436
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
437
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
438
+ etc.)
439
+
440
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
441
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
442
+ behavior.
443
+
444
+ <Tip>
445
+
446
+ TensorFlow models and layers in `transformers` accept two formats as input:
447
+
448
+ - having all inputs as keyword arguments (like PyTorch models), or
449
+ - having all inputs as a list, tuple or dict in the first positional argument.
450
+
451
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
452
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
453
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
454
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
455
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
456
+ positional argument:
457
+
458
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
459
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
460
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
461
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
462
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
463
+
464
+ Note that when creating models and layers with
465
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
466
+ about any of this, as you can just pass inputs like you would to any other Python function!
467
+
468
+ </Tip>
469
+
470
+ Parameters:
471
+ config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
472
+ Initializing with a config file does not load the weights associated with the model, only the
473
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
474
+ """
475
+
476
+ CONVNEXT_INPUTS_DOCSTRING = r"""
477
+ Args:
478
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
479
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
480
+ [`ConvNextImageProcessor.__call__`] for details.
481
+
482
+ output_hidden_states (`bool`, *optional*):
483
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
484
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
485
+ used instead.
486
+ return_dict (`bool`, *optional*):
487
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
488
+ eager mode, in graph mode the value will always be set to True.
489
+ """
490
+
491
+
492
+ @add_start_docstrings(
493
+ "The bare ConvNext model outputting raw features without any specific head on top.",
494
+ CONVNEXT_START_DOCSTRING,
495
+ )
496
+ class TFConvNextModel(TFConvNextPreTrainedModel):
497
+ def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs):
498
+ super().__init__(config, *inputs, **kwargs)
499
+ self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name="convnext")
500
+
501
+ @unpack_inputs
502
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
503
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
504
+ def call(
505
+ self,
506
+ pixel_values: TFModelInputType | None = None,
507
+ output_hidden_states: Optional[bool] = None,
508
+ return_dict: Optional[bool] = None,
509
+ training: bool = False,
510
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
511
+ r"""
512
+ Returns:
513
+
514
+ Examples:
515
+
516
+ ```python
517
+ >>> from transformers import AutoImageProcessor, TFConvNextModel
518
+ >>> from PIL import Image
519
+ >>> import requests
520
+
521
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
522
+ >>> image = Image.open(requests.get(url, stream=True).raw)
523
+
524
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
525
+ >>> model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224")
526
+
527
+ >>> inputs = image_processor(images=image, return_tensors="tf")
528
+ >>> outputs = model(**inputs)
529
+ >>> last_hidden_states = outputs.last_hidden_state
530
+ ```"""
531
+ output_hidden_states = (
532
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
533
+ )
534
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
535
+
536
+ if pixel_values is None:
537
+ raise ValueError("You have to specify pixel_values")
538
+
539
+ outputs = self.convnext(
540
+ pixel_values=pixel_values,
541
+ output_hidden_states=output_hidden_states,
542
+ return_dict=return_dict,
543
+ training=training,
544
+ )
545
+
546
+ if not return_dict:
547
+ return (outputs[0],) + outputs[1:]
548
+
549
+ return TFBaseModelOutputWithPooling(
550
+ last_hidden_state=outputs.last_hidden_state,
551
+ pooler_output=outputs.pooler_output,
552
+ hidden_states=outputs.hidden_states,
553
+ )
554
+
555
+ def build(self, input_shape=None):
556
+ if self.built:
557
+ return
558
+ self.built = True
559
+ if getattr(self, "convnext", None) is not None:
560
+ with tf.name_scope(self.convnext.name):
561
+ self.convnext.build(None)
562
+
563
+
564
+ @add_start_docstrings(
565
+ """
566
+ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
567
+ ImageNet.
568
+ """,
569
+ CONVNEXT_START_DOCSTRING,
570
+ )
571
+ class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss):
572
+ def __init__(self, config: ConvNextConfig, *inputs, **kwargs):
573
+ super().__init__(config, *inputs, **kwargs)
574
+
575
+ self.num_labels = config.num_labels
576
+ self.convnext = TFConvNextMainLayer(config, name="convnext")
577
+
578
+ # Classifier head
579
+ self.classifier = keras.layers.Dense(
580
+ units=config.num_labels,
581
+ kernel_initializer=get_initializer(config.initializer_range),
582
+ bias_initializer="zeros",
583
+ name="classifier",
584
+ )
585
+ self.config = config
586
+
587
+ @unpack_inputs
588
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
589
+ @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
590
+ def call(
591
+ self,
592
+ pixel_values: TFModelInputType | None = None,
593
+ output_hidden_states: Optional[bool] = None,
594
+ return_dict: Optional[bool] = None,
595
+ labels: np.ndarray | tf.Tensor | None = None,
596
+ training: Optional[bool] = False,
597
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
598
+ r"""
599
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
600
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
601
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
602
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
603
+
604
+ Returns:
605
+
606
+ Examples:
607
+
608
+ ```python
609
+ >>> from transformers import AutoImageProcessor, TFConvNextForImageClassification
610
+ >>> import tensorflow as tf
611
+ >>> from PIL import Image
612
+ >>> import requests
613
+
614
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
615
+ >>> image = Image.open(requests.get(url, stream=True).raw)
616
+
617
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
618
+ >>> model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224")
619
+
620
+ >>> inputs = image_processor(images=image, return_tensors="tf")
621
+ >>> outputs = model(**inputs)
622
+ >>> logits = outputs.logits
623
+ >>> # model predicts one of the 1000 ImageNet classes
624
+ >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
625
+ >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
626
+ ```"""
627
+ output_hidden_states = (
628
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
629
+ )
630
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
631
+
632
+ if pixel_values is None:
633
+ raise ValueError("You have to specify pixel_values")
634
+
635
+ outputs = self.convnext(
636
+ pixel_values,
637
+ output_hidden_states=output_hidden_states,
638
+ return_dict=return_dict,
639
+ training=training,
640
+ )
641
+
642
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
643
+
644
+ logits = self.classifier(pooled_output)
645
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
646
+
647
+ if not return_dict:
648
+ output = (logits,) + outputs[2:]
649
+ return ((loss,) + output) if loss is not None else output
650
+
651
+ return TFSequenceClassifierOutput(
652
+ loss=loss,
653
+ logits=logits,
654
+ hidden_states=outputs.hidden_states,
655
+ )
656
+
657
+ def build(self, input_shape=None):
658
+ if self.built:
659
+ return
660
+ self.built = True
661
+ if getattr(self, "convnext", None) is not None:
662
+ with tf.name_scope(self.convnext.name):
663
+ self.convnext.build(None)
664
+ if getattr(self, "classifier", None) is not None:
665
+ if hasattr(self.classifier, "name"):
666
+ with tf.name_scope(self.classifier.name):
667
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__init__.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_kosmos2": ["KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Kosmos2Config"],
27
+ "processing_kosmos2": ["Kosmos2Processor"],
28
+ }
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_kosmos2"] = [
37
+ "KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST",
38
+ "Kosmos2ForConditionalGeneration",
39
+ "Kosmos2Model",
40
+ "Kosmos2PreTrainedModel",
41
+ ]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_kosmos2 import KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP, Kosmos2Config
46
+ from .processing_kosmos2 import Kosmos2Processor
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .modeling_kosmos2 import (
55
+ KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST,
56
+ Kosmos2ForConditionalGeneration,
57
+ Kosmos2Model,
58
+ Kosmos2PreTrainedModel,
59
+ )
60
+
61
+ else:
62
+ import sys
63
+
64
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/configuration_kosmos2.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (2.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/modeling_kosmos2.cpython-310.pyc ADDED
Binary file (64.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/__pycache__/processing_kosmos2.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/configuration_kosmos2.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ KOSMOS-2 model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class Kosmos2TextConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`Kosmos2TextModel`]. It is used to instantiate a
33
+ KOSMOS-2 text decoder according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the text decoder of the KOSMOS-2
35
+ [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 65037):
42
+ Vocabulary size of the Kosmos2 model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`Kosmos2Model`].
44
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
45
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
46
+ just in case (e.g., 512 or 1024 or 2048).
47
+ embed_dim (`int`, *optional*, defaults to 2048):
48
+ Dimensionality of the layers and the pooler layer.
49
+ layers (`int`, *optional*, defaults to 24):
50
+ Number of hidden layers in the Transformer encoder.
51
+ ffn_dim (`int`, *optional*, defaults to 8192):
52
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
53
+ attention_heads (`int`, *optional*, defaults to 32):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
58
+ dropout (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_dropout (`float`, *optional*, defaults to 0.1):
61
+ The dropout ratio for the attention probabilities.
62
+ activation_dropout (`float`, *optional*, defaults to 0.0):
63
+ The dropout ratio for activations inside the fully connected layer.
64
+ layerdrop (`float`, *optional*, defaults to 0.0):
65
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
66
+ for more details.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
68
+ The epsilon used by the layer normalization layers.
69
+ init_std (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ scale_embedding (`bool`, *optional*, defaults to `True`):
72
+ Scale embeddings by diving by sqrt(embed_dim).
73
+ use_cache (`bool`, *optional*, defaults to `True`):
74
+ Whether or not the model should return the last key/values attentions (not used by all models).
75
+ ```"""
76
+
77
+ model_type = "kosmos_2_text_model"
78
+ keys_to_ignore_at_inference = ["past_key_values"]
79
+ attribute_map = {
80
+ "num_attention_heads": "attention_heads",
81
+ "hidden_size": "embed_dim",
82
+ "num_hidden_layers": "layers",
83
+ }
84
+
85
+ def __init__(
86
+ self,
87
+ vocab_size=65037,
88
+ max_position_embeddings=2048,
89
+ embed_dim=2048,
90
+ layers=24,
91
+ ffn_dim=8192,
92
+ attention_heads=32,
93
+ activation_function="gelu",
94
+ dropout=0.1,
95
+ attention_dropout=0.1,
96
+ activation_dropout=0.0,
97
+ layerdrop=0.0,
98
+ layer_norm_eps=1e-5,
99
+ init_std=0.02,
100
+ scale_embedding=True,
101
+ use_cache=True,
102
+ pad_token_id=1,
103
+ bos_token_id=0,
104
+ eos_token_id=2,
105
+ **kwargs,
106
+ ):
107
+ super().__init__(
108
+ pad_token_id=pad_token_id,
109
+ bos_token_id=bos_token_id,
110
+ eos_token_id=eos_token_id,
111
+ **kwargs,
112
+ )
113
+
114
+ self.vocab_size = vocab_size
115
+ self.max_position_embeddings = max_position_embeddings
116
+ self.embed_dim = embed_dim
117
+ self.layers = layers
118
+ self.ffn_dim = ffn_dim
119
+ self.attention_heads = attention_heads
120
+ self.activation_function = activation_function
121
+ self.dropout = dropout
122
+ self.attention_dropout = attention_dropout
123
+ self.activation_dropout = activation_dropout
124
+ self.layerdrop = layerdrop
125
+ self.layer_norm_eps = layer_norm_eps
126
+ self.init_std = init_std
127
+ self.scale_embedding = scale_embedding
128
+ self.use_cache = use_cache
129
+
130
+ @classmethod
131
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
132
+ cls._set_token_in_kwargs(kwargs)
133
+
134
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
135
+
136
+ # get the text config dict if we are loading from Kosmos2Config
137
+ if config_dict.get("model_type") == "kosmos-2":
138
+ config_dict = config_dict["text_config"]
139
+
140
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
141
+ logger.warning(
142
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
143
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
144
+ )
145
+
146
+ return cls.from_dict(config_dict, **kwargs)
147
+
148
+
149
+ class Kosmos2VisionConfig(PretrainedConfig):
150
+ r"""
151
+ This is the configuration class to store the configuration of a [`Kosmos2VisionModel`]. It is used to instantiate a
152
+ KOSMOS-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
153
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the KOSMOS-2
154
+ [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture.
155
+
156
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
157
+ documentation from [`PretrainedConfig`] for more information.
158
+
159
+ Args:
160
+ hidden_size (`int`, *optional*, defaults to 1024):
161
+ Dimensionality of the encoder layers and the pooler layer.
162
+ intermediate_size (`int`, *optional*, defaults to 4096):
163
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
164
+ num_hidden_layers (`int`, *optional*, defaults to 24):
165
+ Number of hidden layers in the Transformer encoder.
166
+ num_attention_heads (`int`, *optional*, defaults to 16):
167
+ Number of attention heads for each attention layer in the Transformer encoder.
168
+ num_channels (`int`, *optional*, defaults to 3):
169
+ The number of input channels.
170
+ image_size (`int`, *optional*, defaults to 224):
171
+ The size (resolution) of each image.
172
+ patch_size (`int`, *optional*, defaults to 14):
173
+ The size (resolution) of each patch.
174
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
175
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
176
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
177
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
178
+ The epsilon used by the layer normalization layers.
179
+ attention_dropout (`float`, *optional*, defaults to 0.0):
180
+ The dropout ratio for the attention probabilities.
181
+ initializer_range (`float`, *optional*, defaults to 0.02):
182
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
183
+ initializer_factor (`float`, *optional*, defaults to 1):
184
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
185
+ testing).
186
+ ```"""
187
+
188
+ model_type = "kosmos_2_vision_model"
189
+
190
+ def __init__(
191
+ self,
192
+ hidden_size=1024,
193
+ intermediate_size=4096,
194
+ num_hidden_layers=24,
195
+ num_attention_heads=16,
196
+ num_channels=3,
197
+ image_size=224,
198
+ patch_size=14,
199
+ hidden_act="quick_gelu",
200
+ layer_norm_eps=1e-5,
201
+ attention_dropout=0.0,
202
+ initializer_range=0.02,
203
+ initializer_factor=1.0,
204
+ **kwargs,
205
+ ):
206
+ super().__init__(**kwargs)
207
+
208
+ self.hidden_size = hidden_size
209
+ self.intermediate_size = intermediate_size
210
+ self.num_hidden_layers = num_hidden_layers
211
+ self.num_attention_heads = num_attention_heads
212
+ self.num_channels = num_channels
213
+ self.patch_size = patch_size
214
+ self.image_size = image_size
215
+ self.initializer_range = initializer_range
216
+ self.initializer_factor = initializer_factor
217
+ self.attention_dropout = attention_dropout
218
+ self.layer_norm_eps = layer_norm_eps
219
+ self.hidden_act = hidden_act
220
+
221
+ @classmethod
222
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
223
+ cls._set_token_in_kwargs(kwargs)
224
+
225
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
226
+
227
+ # get the vision config dict if we are loading from Kosmos2Config
228
+ if config_dict.get("model_type") == "kosmos-2":
229
+ config_dict = config_dict["vision_config"]
230
+
231
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
232
+ logger.warning(
233
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
234
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
235
+ )
236
+
237
+ return cls.from_dict(config_dict, **kwargs)
238
+
239
+
240
+ class Kosmos2Config(PretrainedConfig):
241
+ r"""
242
+ This is the configuration class to store the configuration of a [`Kosmos2Model`]. It is used to instantiate a
243
+ KOSMOS-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
244
+ with the defaults will yield a similar configuration to that of the KOSMOS-2
245
+ [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture.
246
+
247
+ Args:
248
+ text_config (`dict`, *optional*):
249
+ Dictionary of configuration options used to initialize [`Kosmos2TextConfig`].
250
+ vision_config (`dict`, *optional*):
251
+ Dictionary of configuration options used to initialize [`Kosmos2VisionConfig`].
252
+ latent_query_num (`int`, *optional*, defaults to 64):
253
+ The number of latent query tokens that represent the image features used in the text decoder component.
254
+ kwargs (*optional*):
255
+ Dictionary of keyword arguments.
256
+
257
+ Example:
258
+
259
+ ```python
260
+ >>> from transformers import Kosmos2Config, Kosmos2Model
261
+
262
+ >>> # Initializing a Kosmos-2 kosmos-2-patch14-224 style configuration
263
+ >>> configuration = Kosmos2Config()
264
+
265
+ >>> # Initializing a model (with random weights) from the kosmos-2-patch14-224 style configuration
266
+ >>> model = Kosmos2Model(configuration)
267
+
268
+ >>> # Accessing the model configuration
269
+ >>> configuration = model.config
270
+ ```"""
271
+
272
+ model_type = "kosmos-2"
273
+ is_composition = True
274
+
275
+ def __init__(
276
+ self,
277
+ text_config=None,
278
+ vision_config=None,
279
+ latent_query_num=64,
280
+ **kwargs,
281
+ ):
282
+ super().__init__(**kwargs)
283
+
284
+ if text_config is None:
285
+ text_config = {}
286
+ logger.info("`text_config` is `None`. Initializing the `Kosmos2TextConfig` with default values.")
287
+
288
+ if vision_config is None:
289
+ vision_config = {}
290
+ logger.info("`vision_config` is `None`. Initializing the `Kosmos2VisionConfig` with default values.")
291
+
292
+ self.text_config = Kosmos2TextConfig(**text_config)
293
+ self.vision_config = Kosmos2VisionConfig(**vision_config)
294
+
295
+ self.latent_query_num = latent_query_num
llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ from fairseq.checkpoint_utils import load_checkpoint_to_cpu
4
+
5
+ from transformers import Kosmos2Config, Kosmos2ForConditionalGeneration
6
+
7
+
8
+ KEYS_TO_MODIFY_MAPPING = {
9
+ "gpt_model.decoder.output_projection": "text_model.lm_head",
10
+ "gpt_model.decoder": "text_model.model",
11
+ "img_connector": "image_to_text_projection",
12
+ "img_model.visual.class_embedding": "vision_model.model.embeddings.class_embedding",
13
+ "img_model.visual.positional_embedding": "vision_model.model.embeddings.position_embedding.weight",
14
+ "img_model.visual.conv1": "vision_model.model.embeddings.patch_embedding",
15
+ "img_model.visual": "vision_model.model",
16
+ "ln_pre": "pre_layrnorm",
17
+ "ln_post": "post_layernorm",
18
+ "transformer.resblocks": "encoder.layers",
19
+ "ts_attn": "self_attn",
20
+ "ln_1": "layer_norm1",
21
+ "ln_2": "layer_norm2",
22
+ "c_fc": "fc1",
23
+ "c_proj": "fc2",
24
+ }
25
+
26
+
27
+ KEYS_TO_IGNORE = [
28
+ # this buffer in the original code is only used to send weights to the desired device
29
+ "gpt_model.decoder.embed_positions._float_tensor",
30
+ # this weight is never used in the forward in the original KOSMOS-2)
31
+ "gpt_model.decoder.self_attn_sope.scale",
32
+ ]
33
+
34
+
35
+ def rename_key(key):
36
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
37
+ if key_to_modify in key:
38
+ key = key.replace(key_to_modify, new_key)
39
+
40
+ return key
41
+
42
+
43
+ def convert_kosmos2_checkpoint_to_pytorch(checkpoint_path, pytorch_dump_folder_path):
44
+ state = load_checkpoint_to_cpu(checkpoint_path)
45
+ state_dict = state["model"]
46
+ state_dict_keys = list(state_dict.keys())
47
+
48
+ config = Kosmos2Config()
49
+ # This is necessary to match the results given by the original demo
50
+ config.text_config.no_repeat_ngram_size = 3
51
+ model = Kosmos2ForConditionalGeneration(config)
52
+
53
+ # convert (by renaming keys)
54
+ converted_state_dict = {}
55
+ for key in state_dict_keys:
56
+ if key in KEYS_TO_IGNORE:
57
+ continue
58
+ renamed_key = rename_key(key)
59
+ converted_state_dict[renamed_key] = state_dict[key]
60
+
61
+ # check weight loading
62
+ model.load_state_dict(converted_state_dict, strict=True)
63
+ # save the result
64
+ model.save_pretrained(pytorch_dump_folder_path)
65
+
66
+
67
+ if __name__ == "__main__":
68
+ parser = argparse.ArgumentParser()
69
+ # Required parameters
70
+ parser.add_argument(
71
+ "--kosmos2_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
72
+ )
73
+ parser.add_argument(
74
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
75
+ )
76
+ args = parser.parse_args()
77
+ convert_kosmos2_checkpoint_to_pytorch(args.kosmos2_checkpoint_path, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/modeling_kosmos2.py ADDED
@@ -0,0 +1,2054 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch KOSMOS-2 model."""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Any, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutput,
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ BaseModelOutputWithPooling,
32
+ CausalLMOutputWithCrossAttentions,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ ModelOutput,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from .configuration_kosmos2 import Kosmos2Config, Kosmos2TextConfig, Kosmos2VisionConfig
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CONFIG_FOR_DOC = Kosmos2Config
48
+
49
+
50
+ from ..deprecated._archive_maps import KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
51
+
52
+
53
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
54
+ """
55
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
56
+ """
57
+ bsz, src_len = mask.size()
58
+ tgt_len = tgt_len if tgt_len is not None else src_len
59
+
60
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
61
+
62
+ inverted_mask = 1.0 - expanded_mask
63
+
64
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
65
+
66
+
67
+ def _make_causal_mask(
68
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
69
+ ):
70
+ """
71
+ Make causal mask used for bi-directional self-attention.
72
+ """
73
+ bsz, tgt_len = input_ids_shape
74
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
75
+ mask_cond = torch.arange(mask.size(-1), device=device)
76
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
77
+ mask = mask.to(dtype)
78
+
79
+ if past_key_values_length > 0:
80
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
81
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
82
+
83
+
84
+ # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
85
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
86
+ """
87
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
88
+ are ignored. This is modified from fairseq's `utils.make_positions`.
89
+
90
+ Args:
91
+ x: torch.Tensor x:
92
+
93
+ Returns: torch.Tensor
94
+ """
95
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
96
+ mask = input_ids.ne(padding_idx).int()
97
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
98
+ return incremental_indices.long() + padding_idx
99
+
100
+
101
+ KOSMOS2_START_DOCSTRING = r"""
102
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
103
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
104
+ etc.)
105
+
106
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
107
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
108
+ and behavior.
109
+
110
+ Parameters:
111
+ config ([`Kosmos2Config`]): Model configuration class with all the parameters of the model.
112
+ Initializing with a config file does not load the weights associated with the model, only the
113
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
114
+ """
115
+
116
+ KOSMOS2_VISION_INPUTS_DOCSTRING = r"""
117
+ Args:
118
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
119
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
120
+ [`CLIPImageProcessor.__call__`] for details.
121
+ output_attentions (`bool`, *optional*):
122
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
123
+ tensors for more detail.
124
+ output_hidden_states (`bool`, *optional*):
125
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
126
+ more detail.
127
+ return_dict (`bool`, *optional*):
128
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
129
+ """
130
+
131
+ KOSMOS2_TEXT_INPUTS_DOCSTRING = r"""
132
+ Args:
133
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
134
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
135
+ it.
136
+
137
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
138
+ [`PreTrainedTokenizer.__call__`] for details.
139
+
140
+ [What are input IDs?](../glossary#input-ids)
141
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
142
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
143
+
144
+ - 1 for tokens that are **not masked**,
145
+ - 0 for tokens that are **masked**.
146
+
147
+ [What are attention masks?](../glossary#attention-mask)
148
+ image_embeds: (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*):
149
+ Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`.
150
+ image_embeds_position_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
151
+ Mask to indicate the location in a sequence to insert the image features . Mask values selected in `[0,
152
+ 1]`:
153
+
154
+ - 1 for places where to put the image features,
155
+ - 0 for places that are not for image features (i.e. for text tokens).
156
+
157
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
158
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
159
+ the model is configured as a decoder.
160
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
161
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
162
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
163
+
164
+ - 1 for tokens that are **not masked**,
165
+ - 0 for tokens that are **masked**.
166
+
167
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
168
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
169
+
170
+ - 1 indicates the head is **not masked**,
171
+ - 0 indicates the head is **masked**.
172
+
173
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
174
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
175
+
176
+ - 1 indicates the head is **not masked**,
177
+ - 0 indicates the head is **masked**.
178
+
179
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
180
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
181
+
182
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
183
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
184
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
185
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
186
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
187
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
188
+ model's internal embedding lookup matrix.
189
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
190
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
191
+ config.max_position_embeddings - 1]`.
192
+
193
+ [What are position IDs?](../glossary#position-ids)
194
+ use_cache (`bool`, *optional*):
195
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
196
+ `past_key_values`).
197
+ output_attentions (`bool`, *optional*):
198
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
199
+ tensors for more detail.
200
+ output_hidden_states (`bool`, *optional*):
201
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
202
+ more detail.
203
+ return_dict (`bool`, *optional*):
204
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
205
+ """
206
+
207
+ KOSMOS2_INPUTS_DOCSTRING = r"""
208
+ Args:
209
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
210
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
211
+ [`CLIPImageProcessor.__call__`] for details.
212
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
213
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
214
+ it.
215
+
216
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
217
+ [`PreTrainedTokenizer.__call__`] for details.
218
+
219
+ [What are input IDs?](../glossary#input-ids)
220
+ image_embeds_position_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
221
+ Mask to indicate the location in a sequence to insert the image features . Mask values selected in `[0,
222
+ 1]`:
223
+
224
+ - 1 for places where to put the image features,
225
+ - 0 for places that are not for image features (i.e. for text tokens).
226
+
227
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
228
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
229
+
230
+ - 1 for tokens that are **not masked**,
231
+ - 0 for tokens that are **masked**.
232
+
233
+ [What are attention masks?](../glossary#attention-mask)
234
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
235
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
236
+
237
+ - 1 indicates the head is **not masked**,
238
+ - 0 indicates the head is **masked**.
239
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
240
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
241
+
242
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
243
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
244
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
245
+ image_embeds: (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*):
246
+ Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`.
247
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
248
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
249
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
250
+ model's internal embedding lookup matrix.
251
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
252
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
253
+ config.max_position_embeddings - 1]`.
254
+
255
+ [What are position IDs?](../glossary#position-ids)
256
+ use_cache (`bool`, *optional*):
257
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
258
+ `past_key_values`).
259
+ output_attentions (`bool`, *optional*):
260
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
261
+ tensors for more detail.
262
+ output_hidden_states (`bool`, *optional*):
263
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
264
+ more detail.
265
+ return_dict (`bool`, *optional*):
266
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
267
+ """
268
+
269
+
270
+ @dataclass
271
+ class Kosmos2ModelOutput(ModelOutput):
272
+ """
273
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
274
+
275
+ Args:
276
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
277
+ Sequence of hidden-states at the output of the last layer of the model.
278
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
279
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
280
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
281
+
282
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
283
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
284
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
285
+ sequence_length)`.
286
+
287
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
288
+ heads.
289
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*):
290
+ Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`.
291
+ projection_attentions (`tuple(torch.FloatTensor)`, *optional*):
292
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
293
+ sequence_length)`.
294
+
295
+ Attentions weights given by `Kosmos2ImageToTextProjection`, after the attention softmax, used to compute
296
+ the weighted average in the self-attention heads.
297
+ vision_model_output(`BaseModelOutputWithPooling`, *optional*):
298
+ The output of the [`Kosmos2VisionModel`].
299
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
300
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
301
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
302
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
303
+ encoder_sequence_length, embed_size_per_head)`.
304
+
305
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
306
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
307
+ input) to speed up sequential decoding.
308
+ """
309
+
310
+ last_hidden_state: torch.FloatTensor = None
311
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
312
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
313
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
314
+ image_embeds: Optional[torch.FloatTensor] = None
315
+ projection_attentions: Optional[Tuple[torch.FloatTensor]] = None
316
+ vision_model_output: BaseModelOutputWithPooling = None
317
+
318
+ def to_tuple(self) -> Tuple[Any]:
319
+ return tuple(
320
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
321
+ for k in self.keys()
322
+ )
323
+
324
+
325
+ @dataclass
326
+ class Kosmos2ForConditionalGenerationModelOutput(ModelOutput):
327
+ """
328
+ Model output class for `Kosmos2ForConditionalGeneration`.
329
+
330
+ Args:
331
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
332
+ Language modeling loss (for next-token prediction).
333
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
334
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
335
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
336
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
337
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
338
+
339
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
340
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
341
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
342
+ sequence_length)`.
343
+
344
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
345
+ heads.
346
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*):
347
+ Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`.
348
+ projection_attentions (`tuple(torch.FloatTensor)`, *optional*):
349
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
350
+ sequence_length)`.
351
+
352
+ Attentions weights given by `Kosmos2ImageToTextProjection`, after the attention softmax, used to compute
353
+ the weighted average in the self-attention heads.
354
+ vision_model_output(`BaseModelOutputWithPooling`, *optional*):
355
+ The output of the [`Kosmos2VisionModel`].
356
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
357
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
358
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
359
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
360
+ encoder_sequence_length, embed_size_per_head)`.
361
+
362
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
363
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
364
+ input) to speed up sequential decoding.
365
+ """
366
+
367
+ loss: Optional[torch.FloatTensor] = None
368
+ logits: torch.FloatTensor = None
369
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
370
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
371
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
372
+ image_embeds: Optional[torch.FloatTensor] = None
373
+ projection_attentions: Optional[Tuple[torch.FloatTensor]] = None
374
+ vision_model_output: BaseModelOutputWithPooling = None
375
+
376
+ def to_tuple(self) -> Tuple[Any]:
377
+ return tuple(
378
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
379
+ for k in self.keys()
380
+ )
381
+
382
+
383
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Kosmos2
384
+ class Kosmos2VisionEmbeddings(nn.Module):
385
+ def __init__(self, config: Kosmos2VisionConfig):
386
+ super().__init__()
387
+ self.config = config
388
+ self.embed_dim = config.hidden_size
389
+ self.image_size = config.image_size
390
+ self.patch_size = config.patch_size
391
+
392
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
393
+
394
+ self.patch_embedding = nn.Conv2d(
395
+ in_channels=config.num_channels,
396
+ out_channels=self.embed_dim,
397
+ kernel_size=self.patch_size,
398
+ stride=self.patch_size,
399
+ bias=False,
400
+ )
401
+
402
+ self.num_patches = (self.image_size // self.patch_size) ** 2
403
+ self.num_positions = self.num_patches + 1
404
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
405
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
406
+
407
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
408
+ batch_size = pixel_values.shape[0]
409
+ target_dtype = self.patch_embedding.weight.dtype
410
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
411
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
412
+
413
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
414
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
415
+ embeddings = embeddings + self.position_embedding(self.position_ids)
416
+ return embeddings
417
+
418
+
419
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->Kosmos2Vision
420
+ class Kosmos2VisionAttention(nn.Module):
421
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
422
+
423
+ def __init__(self, config):
424
+ super().__init__()
425
+ self.config = config
426
+ self.embed_dim = config.hidden_size
427
+ self.num_heads = config.num_attention_heads
428
+ self.head_dim = self.embed_dim // self.num_heads
429
+ if self.head_dim * self.num_heads != self.embed_dim:
430
+ raise ValueError(
431
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
432
+ f" {self.num_heads})."
433
+ )
434
+ self.scale = self.head_dim**-0.5
435
+ self.dropout = config.attention_dropout
436
+
437
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
438
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
439
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
440
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
441
+
442
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
443
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
444
+
445
+ def forward(
446
+ self,
447
+ hidden_states: torch.Tensor,
448
+ attention_mask: Optional[torch.Tensor] = None,
449
+ causal_attention_mask: Optional[torch.Tensor] = None,
450
+ output_attentions: Optional[bool] = False,
451
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
452
+ """Input shape: Batch x Time x Channel"""
453
+
454
+ bsz, tgt_len, embed_dim = hidden_states.size()
455
+
456
+ # get query proj
457
+ query_states = self.q_proj(hidden_states) * self.scale
458
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
459
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
460
+
461
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
462
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
463
+ key_states = key_states.view(*proj_shape)
464
+ value_states = value_states.view(*proj_shape)
465
+
466
+ src_len = key_states.size(1)
467
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
468
+
469
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
470
+ raise ValueError(
471
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
472
+ f" {attn_weights.size()}"
473
+ )
474
+
475
+ # apply the causal_attention_mask first
476
+ if causal_attention_mask is not None:
477
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
478
+ raise ValueError(
479
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
480
+ f" {causal_attention_mask.size()}"
481
+ )
482
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
483
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
484
+
485
+ if attention_mask is not None:
486
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
487
+ raise ValueError(
488
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
489
+ )
490
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
491
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
492
+
493
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
494
+
495
+ if output_attentions:
496
+ # this operation is a bit akward, but it's required to
497
+ # make sure that attn_weights keeps its gradient.
498
+ # In order to do so, attn_weights have to reshaped
499
+ # twice and have to be reused in the following
500
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
501
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
502
+ else:
503
+ attn_weights_reshaped = None
504
+
505
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
506
+
507
+ attn_output = torch.bmm(attn_probs, value_states)
508
+
509
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
510
+ raise ValueError(
511
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
512
+ f" {attn_output.size()}"
513
+ )
514
+
515
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
516
+ attn_output = attn_output.transpose(1, 2)
517
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
518
+
519
+ attn_output = self.out_proj(attn_output)
520
+
521
+ return attn_output, attn_weights_reshaped
522
+
523
+
524
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Kosmos2Vision
525
+ class Kosmos2VisionMLP(nn.Module):
526
+ def __init__(self, config):
527
+ super().__init__()
528
+ self.config = config
529
+ self.activation_fn = ACT2FN[config.hidden_act]
530
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
531
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
532
+
533
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
534
+ hidden_states = self.fc1(hidden_states)
535
+ hidden_states = self.activation_fn(hidden_states)
536
+ hidden_states = self.fc2(hidden_states)
537
+ return hidden_states
538
+
539
+
540
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Kosmos2Vision
541
+ class Kosmos2VisionEncoderLayer(nn.Module):
542
+ def __init__(self, config: Kosmos2VisionConfig):
543
+ super().__init__()
544
+ self.embed_dim = config.hidden_size
545
+ self.self_attn = Kosmos2VisionAttention(config)
546
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
547
+ self.mlp = Kosmos2VisionMLP(config)
548
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
549
+
550
+ def forward(
551
+ self,
552
+ hidden_states: torch.Tensor,
553
+ attention_mask: torch.Tensor,
554
+ causal_attention_mask: torch.Tensor,
555
+ output_attentions: Optional[bool] = False,
556
+ ) -> Tuple[torch.FloatTensor]:
557
+ """
558
+ Args:
559
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
560
+ attention_mask (`torch.FloatTensor`): attention mask of size
561
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
562
+ `(config.encoder_attention_heads,)`.
563
+ output_attentions (`bool`, *optional*):
564
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
565
+ returned tensors for more detail.
566
+ """
567
+ residual = hidden_states
568
+
569
+ hidden_states = self.layer_norm1(hidden_states)
570
+ hidden_states, attn_weights = self.self_attn(
571
+ hidden_states=hidden_states,
572
+ attention_mask=attention_mask,
573
+ causal_attention_mask=causal_attention_mask,
574
+ output_attentions=output_attentions,
575
+ )
576
+ hidden_states = residual + hidden_states
577
+
578
+ residual = hidden_states
579
+ hidden_states = self.layer_norm2(hidden_states)
580
+ hidden_states = self.mlp(hidden_states)
581
+ hidden_states = residual + hidden_states
582
+
583
+ outputs = (hidden_states,)
584
+
585
+ if output_attentions:
586
+ outputs += (attn_weights,)
587
+
588
+ return outputs
589
+
590
+
591
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Kosmos2Vision
592
+ class Kosmos2VisionEncoder(nn.Module):
593
+ """
594
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
595
+ [`Kosmos2VisionEncoderLayer`].
596
+
597
+ Args:
598
+ config: Kosmos2VisionConfig
599
+ """
600
+
601
+ def __init__(self, config: Kosmos2VisionConfig):
602
+ super().__init__()
603
+ self.config = config
604
+ self.layers = nn.ModuleList([Kosmos2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
605
+ self.gradient_checkpointing = False
606
+
607
+ def forward(
608
+ self,
609
+ inputs_embeds,
610
+ attention_mask: Optional[torch.Tensor] = None,
611
+ causal_attention_mask: Optional[torch.Tensor] = None,
612
+ output_attentions: Optional[bool] = None,
613
+ output_hidden_states: Optional[bool] = None,
614
+ return_dict: Optional[bool] = None,
615
+ ) -> Union[Tuple, BaseModelOutput]:
616
+ r"""
617
+ Args:
618
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
619
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
620
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
621
+ than the model's internal embedding lookup matrix.
622
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
623
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
624
+
625
+ - 1 for tokens that are **not masked**,
626
+ - 0 for tokens that are **masked**.
627
+
628
+ [What are attention masks?](../glossary#attention-mask)
629
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
630
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
631
+
632
+ - 1 for tokens that are **not masked**,
633
+ - 0 for tokens that are **masked**.
634
+
635
+ [What are attention masks?](../glossary#attention-mask)
636
+ output_attentions (`bool`, *optional*):
637
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
638
+ returned tensors for more detail.
639
+ output_hidden_states (`bool`, *optional*):
640
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
641
+ for more detail.
642
+ return_dict (`bool`, *optional*):
643
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
644
+ """
645
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
646
+ output_hidden_states = (
647
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
648
+ )
649
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
650
+
651
+ encoder_states = () if output_hidden_states else None
652
+ all_attentions = () if output_attentions else None
653
+
654
+ hidden_states = inputs_embeds
655
+ for idx, encoder_layer in enumerate(self.layers):
656
+ if output_hidden_states:
657
+ encoder_states = encoder_states + (hidden_states,)
658
+ if self.gradient_checkpointing and self.training:
659
+ layer_outputs = self._gradient_checkpointing_func(
660
+ encoder_layer.__call__,
661
+ hidden_states,
662
+ attention_mask,
663
+ causal_attention_mask,
664
+ output_attentions,
665
+ )
666
+ else:
667
+ layer_outputs = encoder_layer(
668
+ hidden_states,
669
+ attention_mask,
670
+ causal_attention_mask,
671
+ output_attentions=output_attentions,
672
+ )
673
+
674
+ hidden_states = layer_outputs[0]
675
+
676
+ if output_attentions:
677
+ all_attentions = all_attentions + (layer_outputs[1],)
678
+
679
+ if output_hidden_states:
680
+ encoder_states = encoder_states + (hidden_states,)
681
+
682
+ if not return_dict:
683
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
684
+ return BaseModelOutput(
685
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
686
+ )
687
+
688
+
689
+ # Similar to `transformers.models.clip.modeling_clip.CLIPVisionTransformer` but without docstring for `forward`
690
+ class Kosmos2VisionTransformer(nn.Module):
691
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.__init__ with CLIPVision->Kosmos2Vision,CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2Vision
692
+ def __init__(self, config: Kosmos2VisionConfig):
693
+ super().__init__()
694
+ self.config = config
695
+ embed_dim = config.hidden_size
696
+
697
+ self.embeddings = Kosmos2VisionEmbeddings(config)
698
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
699
+ self.encoder = Kosmos2VisionEncoder(config)
700
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
701
+
702
+ def forward(
703
+ self,
704
+ pixel_values: Optional[torch.FloatTensor] = None,
705
+ output_attentions: Optional[bool] = None,
706
+ output_hidden_states: Optional[bool] = None,
707
+ return_dict: Optional[bool] = None,
708
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
709
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
710
+ output_hidden_states = (
711
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
712
+ )
713
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
714
+
715
+ if pixel_values is None:
716
+ raise ValueError("You have to specify pixel_values")
717
+
718
+ hidden_states = self.embeddings(pixel_values)
719
+ hidden_states = self.pre_layrnorm(hidden_states)
720
+
721
+ encoder_outputs = self.encoder(
722
+ inputs_embeds=hidden_states,
723
+ output_attentions=output_attentions,
724
+ output_hidden_states=output_hidden_states,
725
+ return_dict=return_dict,
726
+ )
727
+
728
+ last_hidden_state = encoder_outputs[0]
729
+ pooled_output = last_hidden_state[:, 0, :]
730
+ pooled_output = self.post_layernorm(pooled_output)
731
+
732
+ if not return_dict:
733
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
734
+
735
+ return BaseModelOutputWithPooling(
736
+ last_hidden_state=last_hidden_state,
737
+ pooler_output=pooled_output,
738
+ hidden_states=encoder_outputs.hidden_states,
739
+ attentions=encoder_outputs.attentions,
740
+ )
741
+
742
+
743
+ # Similar to `transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding` but allowing to pass `position_ids`
744
+ class Kosmos2TextSinusoidalPositionalEmbedding(nn.Module):
745
+ """This module produces sinusoidal positional embeddings of any length."""
746
+
747
+ # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.__init__
748
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
749
+ super().__init__()
750
+ self.offset = 2
751
+ self.embedding_dim = embedding_dim
752
+ self.padding_idx = padding_idx
753
+ self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
754
+
755
+ # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.make_weights
756
+ def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
757
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
758
+ if hasattr(self, "weights"):
759
+ # in forward put the weights on the correct dtype and device of the param
760
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
761
+
762
+ self.register_buffer("weights", emb_weights, persistent=False)
763
+
764
+ @staticmethod
765
+ # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.get_embedding
766
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
767
+ """
768
+ Build sinusoidal embeddings.
769
+
770
+ This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
771
+ "Attention Is All You Need".
772
+ """
773
+ half_dim = embedding_dim // 2
774
+ emb = math.log(10000) / (half_dim - 1)
775
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
776
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
777
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
778
+ if embedding_dim % 2 == 1:
779
+ # zero pad
780
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
781
+ if padding_idx is not None:
782
+ emb[padding_idx, :] = 0
783
+
784
+ return emb.to(torch.get_default_dtype())
785
+
786
+ @torch.no_grad()
787
+ def forward(
788
+ self,
789
+ input_ids: torch.Tensor = None,
790
+ inputs_embeds: torch.Tensor = None,
791
+ past_key_values_length: int = 0,
792
+ position_ids: torch.Tensor = None,
793
+ ):
794
+ if input_ids is not None:
795
+ bsz, seq_len = input_ids.size()
796
+ if position_ids is None:
797
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
798
+ position_ids = create_position_ids_from_input_ids(
799
+ input_ids, self.padding_idx, past_key_values_length
800
+ ).to(input_ids.device)
801
+ else:
802
+ bsz, seq_len = inputs_embeds.size()[:-1]
803
+ if position_ids is None:
804
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length)
805
+
806
+ # expand embeddings if needed
807
+ max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
808
+ if max_pos > self.weights.size(0):
809
+ self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
810
+
811
+ return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
812
+
813
+ # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.create_position_ids_from_inputs_embeds
814
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):
815
+ """
816
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
817
+
818
+ Args:
819
+ inputs_embeds: torch.Tensor
820
+
821
+ Returns: torch.Tensor
822
+ """
823
+ input_shape = inputs_embeds.size()[:-1]
824
+ sequence_length = input_shape[1]
825
+
826
+ position_ids = torch.arange(
827
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
828
+ )
829
+ return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
830
+
831
+
832
+ class KosmosTextAttention(nn.Module):
833
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
834
+
835
+ # Similar to transformers.models.bart.modeling_bart.BartAttention.__init__ except an additional `inner_attn_ln`.
836
+ def __init__(
837
+ self,
838
+ config,
839
+ embed_dim: int,
840
+ num_heads: int,
841
+ dropout: float = 0.0,
842
+ is_decoder: bool = False,
843
+ add_inner_attn_layernorm: bool = False,
844
+ bias: bool = True,
845
+ ):
846
+ super().__init__()
847
+ self.embed_dim = embed_dim
848
+ self.num_heads = num_heads
849
+ self.dropout = dropout
850
+ self.head_dim = embed_dim // num_heads
851
+
852
+ if (self.head_dim * num_heads) != self.embed_dim:
853
+ raise ValueError(
854
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
855
+ f" and `num_heads`: {num_heads})."
856
+ )
857
+ self.scaling = self.head_dim**-0.5
858
+ self.is_decoder = is_decoder
859
+
860
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
861
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
862
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
863
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
864
+
865
+ # End opy
866
+ self.inner_attn_ln = None
867
+ if add_inner_attn_layernorm:
868
+ self.inner_attn_ln = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
869
+
870
+ def _shape(self, projection: torch.Tensor) -> torch.Tensor:
871
+ new_projection_shape = projection.size()[:-1] + (self.num_heads, self.head_dim)
872
+ # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D)
873
+ new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3)
874
+ return new_projection
875
+
876
+ def forward(
877
+ self,
878
+ hidden_states: torch.Tensor,
879
+ encoder_hidden_states: Optional[torch.Tensor] = None,
880
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
881
+ attention_mask: Optional[torch.Tensor] = None,
882
+ layer_head_mask: Optional[torch.Tensor] = None,
883
+ output_attentions: bool = False,
884
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
885
+ """Input shape: Batch x Time x Channel"""
886
+
887
+ # if key_value_states are provided this layer is used as a cross-attention layer
888
+ # for the decoder
889
+ is_cross_attention = encoder_hidden_states is not None
890
+ batch_size, seq_length = hidden_states.shape[:2]
891
+
892
+ # use encoder_hidden_states if cross attention
893
+ current_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
894
+ # checking that the `sequence_length` of the `past_key_value` is the same as the he provided
895
+ # `encoder_hidden_states` to support prefix tuning
896
+ if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]:
897
+ # reuse k,v, cross_attentions
898
+ key_states = past_key_value[0]
899
+ value_states = past_key_value[1]
900
+ else:
901
+ key_states = self._shape(self.k_proj(current_states))
902
+ value_states = self._shape(self.v_proj(current_states))
903
+ if past_key_value is not None and not is_cross_attention:
904
+ # reuse k, v, self_attention
905
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
906
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
907
+
908
+ query_states = self._shape(self.q_proj(hidden_states) * self.scaling)
909
+ attn_weights = torch.matmul(query_states, key_states.transpose(-1, -2))
910
+
911
+ if self.is_decoder:
912
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
913
+ # Further calls to cross_attention layer can then reuse all cross-attention
914
+ # key/value_states (first "if" case)
915
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
916
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
917
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
918
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
919
+ past_key_value = (key_states, value_states)
920
+
921
+ src_len = key_states.size(2)
922
+
923
+ if attention_mask is not None:
924
+ if attention_mask.size() != (batch_size, 1, seq_length, src_len):
925
+ raise ValueError(
926
+ f"Attention mask should be of size {(batch_size, 1, seq_length, src_len)}, but is {attention_mask.size()}"
927
+ )
928
+ attn_weights = attn_weights + attention_mask
929
+
930
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
931
+
932
+ # Mask heads if we want to
933
+ if layer_head_mask is not None:
934
+ attn_weights = attn_weights * layer_head_mask
935
+
936
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
937
+
938
+ # attn_output = torch.bmm(attn_probs, value_states) ?
939
+ context_states = torch.matmul(attn_weights, value_states)
940
+ # attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) ?
941
+ context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1)
942
+
943
+ if self.inner_attn_ln is not None:
944
+ context_states = self.inner_attn_ln(context_states)
945
+
946
+ attn_output = self.out_proj(context_states)
947
+
948
+ return attn_output, attn_weights, past_key_value
949
+
950
+
951
+ class Kosmos2TextFFN(nn.Module):
952
+ def __init__(self, config: Kosmos2TextConfig):
953
+ super().__init__()
954
+
955
+ self.dropout = config.dropout
956
+ self.activation_fn = ACT2FN[config.activation_function]
957
+ self.activation_dropout = config.activation_dropout
958
+
959
+ self.fc1 = nn.Linear(config.embed_dim, config.ffn_dim)
960
+ self.fc2 = nn.Linear(config.ffn_dim, config.embed_dim)
961
+
962
+ self.ffn_layernorm = nn.LayerNorm(config.ffn_dim, eps=config.layer_norm_eps)
963
+
964
+ def forward(self, hidden_states):
965
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
966
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
967
+ hidden_states = self.ffn_layernorm(hidden_states)
968
+ hidden_states = self.fc2(hidden_states)
969
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
970
+
971
+ return hidden_states
972
+
973
+
974
+ class Kosmos2TextBlock(nn.Module):
975
+ def __init__(self, config: Kosmos2TextConfig):
976
+ super().__init__()
977
+ self.embed_dim = config.embed_dim
978
+
979
+ self.self_attn = KosmosTextAttention(
980
+ config,
981
+ embed_dim=self.embed_dim,
982
+ num_heads=config.attention_heads,
983
+ dropout=config.attention_dropout,
984
+ is_decoder=True,
985
+ add_inner_attn_layernorm=True,
986
+ )
987
+ self.dropout = config.dropout
988
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
989
+
990
+ if config.add_cross_attention:
991
+ self.encoder_attn = KosmosTextAttention(
992
+ config,
993
+ embed_dim=self.embed_dim,
994
+ num_heads=config.attention_heads,
995
+ dropout=config.attention_dropout,
996
+ is_decoder=True,
997
+ add_inner_attn_layernorm=False,
998
+ )
999
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
1000
+
1001
+ self.ffn = Kosmos2TextFFN(config)
1002
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
1003
+
1004
+ def forward(
1005
+ self,
1006
+ hidden_states: torch.Tensor,
1007
+ attention_mask: Optional[torch.Tensor] = None,
1008
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1009
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1010
+ layer_head_mask: Optional[torch.Tensor] = None,
1011
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
1012
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
1013
+ output_attentions: Optional[bool] = False,
1014
+ use_cache: Optional[bool] = True,
1015
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
1016
+ residual = hidden_states
1017
+
1018
+ # Self Attention
1019
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
1020
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
1021
+
1022
+ hidden_states = self.self_attn_layer_norm(hidden_states)
1023
+
1024
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
1025
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
1026
+ hidden_states=hidden_states,
1027
+ past_key_value=self_attn_past_key_value,
1028
+ attention_mask=attention_mask,
1029
+ layer_head_mask=layer_head_mask,
1030
+ output_attentions=output_attentions,
1031
+ )
1032
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1033
+ hidden_states = residual + hidden_states
1034
+
1035
+ # Cross-Attention Block
1036
+ cross_attn_present_key_value = None
1037
+ cross_attn_weights = None
1038
+ if encoder_hidden_states is not None:
1039
+ if not hasattr(self, "encoder_attn"):
1040
+ raise ValueError(
1041
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
1042
+ " by setting `config.add_cross_attention=True`"
1043
+ )
1044
+
1045
+ residual = hidden_states
1046
+
1047
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
1048
+
1049
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
1050
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
1051
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
1052
+ hidden_states=hidden_states,
1053
+ encoder_hidden_states=encoder_hidden_states,
1054
+ attention_mask=encoder_attention_mask,
1055
+ layer_head_mask=cross_attn_layer_head_mask,
1056
+ past_key_value=cross_attn_past_key_value,
1057
+ output_attentions=output_attentions,
1058
+ )
1059
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1060
+ hidden_states = residual + hidden_states
1061
+
1062
+ # add cross-attn to positions 3,4 of present_key_value tuple
1063
+ present_key_value = present_key_value + cross_attn_present_key_value
1064
+
1065
+ # Fully Connected
1066
+ residual = hidden_states
1067
+
1068
+ hidden_states = self.final_layer_norm(hidden_states)
1069
+
1070
+ # FFN
1071
+ hidden_states = self.ffn(hidden_states)
1072
+ hidden_states = residual + hidden_states
1073
+
1074
+ outputs = (hidden_states,)
1075
+
1076
+ if output_attentions:
1077
+ outputs += (self_attn_weights, cross_attn_weights)
1078
+
1079
+ if use_cache:
1080
+ outputs += (present_key_value,)
1081
+
1082
+ return outputs
1083
+
1084
+
1085
+ class Kosmos2TextTransformer(nn.Module):
1086
+ """
1087
+ Transformer decoder consisting of `config.layers` layers. Each layer is a [`Kosmos2TextBlock`].
1088
+
1089
+ Args:
1090
+ config: Kosmos2TextConfig
1091
+ """
1092
+
1093
+ def __init__(self, config: Kosmos2TextConfig):
1094
+ super().__init__()
1095
+ self.config = config
1096
+ self.dropout = config.dropout
1097
+ self.layerdrop = config.layerdrop
1098
+
1099
+ self.embed_scale = math.sqrt(config.embed_dim) if config.scale_embedding else 1.0
1100
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.embed_dim, padding_idx=config.pad_token_id)
1101
+
1102
+ self.embed_positions = Kosmos2TextSinusoidalPositionalEmbedding(
1103
+ num_positions=config.max_position_embeddings,
1104
+ embedding_dim=config.embed_dim,
1105
+ padding_idx=config.pad_token_id,
1106
+ )
1107
+
1108
+ self.layers = nn.ModuleList([Kosmos2TextBlock(config) for _ in range(config.layers)])
1109
+ self.layer_norm = nn.LayerNorm(config.embed_dim, config.layer_norm_eps)
1110
+
1111
+ self.gradient_checkpointing = False
1112
+
1113
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
1114
+ # create causal mask
1115
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1116
+ combined_attention_mask = None
1117
+ if input_shape[-1] > 1:
1118
+ combined_attention_mask = _make_causal_mask(
1119
+ input_shape,
1120
+ inputs_embeds.dtype,
1121
+ device=inputs_embeds.device,
1122
+ past_key_values_length=past_key_values_length,
1123
+ )
1124
+
1125
+ if attention_mask is not None:
1126
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1127
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
1128
+ inputs_embeds.device
1129
+ )
1130
+ combined_attention_mask = (
1131
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
1132
+ )
1133
+
1134
+ return combined_attention_mask
1135
+
1136
+ def forward_embedding(
1137
+ self,
1138
+ input_ids,
1139
+ inputs_embeds: torch.Tensor = None,
1140
+ image_embeds: torch.Tensor = None,
1141
+ img_input_mask: torch.Tensor = None,
1142
+ past_key_values_length: int = 0,
1143
+ position_ids: torch.Tensor = None,
1144
+ ):
1145
+ # The argument `inputs_embeds` should be the one without being multiplied by `self.embed_scale`.
1146
+ if inputs_embeds is None:
1147
+ inputs_embeds = self.embed_tokens(input_ids)
1148
+
1149
+ if image_embeds is not None:
1150
+ inputs_embeds[img_input_mask.to(dtype=torch.bool)] = image_embeds.to(inputs_embeds.device).view(
1151
+ -1, image_embeds.size(-1)
1152
+ )
1153
+
1154
+ inputs_embeds = inputs_embeds * self.embed_scale
1155
+
1156
+ # embed positions
1157
+ positions = self.embed_positions(
1158
+ input_ids=input_ids,
1159
+ inputs_embeds=inputs_embeds,
1160
+ past_key_values_length=past_key_values_length,
1161
+ position_ids=position_ids,
1162
+ )
1163
+ positions = positions.to(inputs_embeds.device)
1164
+
1165
+ hidden_states = inputs_embeds + positions
1166
+
1167
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1168
+
1169
+ return hidden_states
1170
+
1171
+ def forward(
1172
+ self,
1173
+ input_ids: Optional[torch.Tensor] = None,
1174
+ attention_mask: Optional[torch.Tensor] = None,
1175
+ image_embeds: Optional[torch.Tensor] = None,
1176
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
1177
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1178
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1179
+ head_mask: Optional[torch.Tensor] = None,
1180
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1181
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1182
+ inputs_embeds: Optional[torch.Tensor] = None,
1183
+ position_ids: Optional[torch.Tensor] = None,
1184
+ use_cache: Optional[bool] = None,
1185
+ output_attentions: Optional[bool] = None,
1186
+ output_hidden_states: Optional[bool] = None,
1187
+ return_dict: Optional[bool] = None,
1188
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
1189
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1190
+ output_hidden_states = (
1191
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1192
+ )
1193
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1194
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1195
+
1196
+ if input_ids is not None and inputs_embeds is not None:
1197
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1198
+ elif input_ids is not None:
1199
+ input_shape = input_ids.shape
1200
+ input_ids = input_ids.view(-1, input_shape[-1])
1201
+ elif inputs_embeds is not None:
1202
+ input_shape = inputs_embeds.size()[:-1]
1203
+ else:
1204
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1205
+
1206
+ # past_key_values_length
1207
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1208
+
1209
+ # We don't need img info. when `past_key_values_length` > 0
1210
+ if past_key_values_length > 0:
1211
+ image_embeds = None
1212
+ image_embeds_position_mask = None
1213
+
1214
+ hidden_states = self.forward_embedding(
1215
+ input_ids=input_ids,
1216
+ inputs_embeds=inputs_embeds,
1217
+ image_embeds=image_embeds,
1218
+ img_input_mask=image_embeds_position_mask,
1219
+ past_key_values_length=past_key_values_length,
1220
+ position_ids=position_ids,
1221
+ )
1222
+
1223
+ attention_mask = self._prepare_decoder_attention_mask(
1224
+ attention_mask, input_shape, hidden_states, past_key_values_length
1225
+ )
1226
+
1227
+ # expand encoder attention mask
1228
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1229
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1230
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
1231
+
1232
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1233
+
1234
+ if self.gradient_checkpointing and self.training:
1235
+ if use_cache:
1236
+ logger.warning_once(
1237
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1238
+ )
1239
+ use_cache = False
1240
+
1241
+ # decoder layers
1242
+ all_hidden_states = () if output_hidden_states else None
1243
+ all_self_attns = () if output_attentions else None
1244
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
1245
+ present_key_value_states = () if use_cache else None
1246
+
1247
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
1248
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
1249
+ if attn_mask is not None:
1250
+ if attn_mask.size()[0] != (len(self.layers)):
1251
+ raise ValueError(
1252
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
1253
+ f" {head_mask.size()[0]}."
1254
+ )
1255
+
1256
+ for idx, decoder_layer in enumerate(self.layers):
1257
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1258
+ if output_hidden_states:
1259
+ all_hidden_states += (hidden_states,)
1260
+ if self.training:
1261
+ dropout_probability = torch.rand([])
1262
+ if dropout_probability < self.layerdrop:
1263
+ continue
1264
+
1265
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1266
+
1267
+ if self.gradient_checkpointing and self.training:
1268
+ layer_outputs = self._gradient_checkpointing_func(
1269
+ decoder_layer.__call__,
1270
+ hidden_states,
1271
+ attention_mask,
1272
+ encoder_hidden_states,
1273
+ encoder_attention_mask,
1274
+ head_mask[idx] if head_mask is not None else None,
1275
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1276
+ None,
1277
+ output_attentions,
1278
+ use_cache,
1279
+ )
1280
+ else:
1281
+ layer_outputs = decoder_layer(
1282
+ hidden_states,
1283
+ attention_mask=attention_mask,
1284
+ encoder_hidden_states=encoder_hidden_states,
1285
+ encoder_attention_mask=encoder_attention_mask,
1286
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
1287
+ cross_attn_layer_head_mask=(
1288
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1289
+ ),
1290
+ past_key_value=past_key_value,
1291
+ output_attentions=output_attentions,
1292
+ use_cache=use_cache,
1293
+ )
1294
+ hidden_states = layer_outputs[0]
1295
+
1296
+ if use_cache:
1297
+ present_key_value_states += (layer_outputs[3 if output_attentions else 1],)
1298
+
1299
+ if output_attentions:
1300
+ all_self_attns += (layer_outputs[1],)
1301
+
1302
+ if encoder_hidden_states is not None:
1303
+ all_cross_attentions += (layer_outputs[2],)
1304
+
1305
+ # add final layer norm
1306
+ hidden_states = self.layer_norm(hidden_states)
1307
+
1308
+ # add hidden states from the last decoder layer
1309
+ if output_hidden_states:
1310
+ all_hidden_states += (hidden_states,)
1311
+
1312
+ if not return_dict:
1313
+ return tuple(
1314
+ v
1315
+ for v in [
1316
+ hidden_states,
1317
+ present_key_value_states,
1318
+ all_hidden_states,
1319
+ all_self_attns,
1320
+ all_cross_attentions,
1321
+ ]
1322
+ if v is not None
1323
+ )
1324
+ return BaseModelOutputWithPastAndCrossAttentions(
1325
+ last_hidden_state=hidden_states,
1326
+ past_key_values=present_key_value_states,
1327
+ hidden_states=all_hidden_states,
1328
+ attentions=all_self_attns,
1329
+ cross_attentions=all_cross_attentions,
1330
+ )
1331
+
1332
+
1333
+ class Kosmos2PreTrainedModel(PreTrainedModel):
1334
+ """
1335
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1336
+ models.
1337
+ """
1338
+
1339
+ config_class = Kosmos2Config
1340
+ supports_gradient_checkpointing = True
1341
+ _no_split_modules = ["Kosmos2VisionEncoderLayer", "Kosmos2TextBlock"]
1342
+
1343
+ def _init_weights(self, module):
1344
+ """Initialize the weights"""
1345
+ if isinstance(self, Kosmos2VisionModel):
1346
+ factor = self.config.initializer_factor
1347
+ elif isinstance(self, (Kosmos2Model, Kosmos2ForConditionalGeneration)):
1348
+ factor = self.config.vision_config.initializer_factor
1349
+
1350
+ if isinstance(self, (Kosmos2TextModel, Kosmos2TextForCausalLM)):
1351
+ std = self.config.init_std
1352
+ elif isinstance(self, (Kosmos2Model, Kosmos2ForConditionalGeneration)):
1353
+ std = self.config.text_config.init_std
1354
+
1355
+ if isinstance(module, Kosmos2VisionEmbeddings):
1356
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
1357
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
1358
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
1359
+ elif isinstance(module, Kosmos2VisionAttention):
1360
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
1361
+ out_proj_std = (module.embed_dim**-0.5) * factor
1362
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
1363
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
1364
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
1365
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
1366
+ if module.q_proj.bias is not None:
1367
+ module.q_proj.bias.data.zero_()
1368
+ if module.k_proj.bias is not None:
1369
+ module.k_proj.bias.data.zero_()
1370
+ if module.v_proj.bias is not None:
1371
+ module.v_proj.bias.data.zero_()
1372
+ if module.out_proj.bias is not None:
1373
+ module.out_proj.bias.data.zero_()
1374
+ elif isinstance(module, Kosmos2VisionMLP):
1375
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
1376
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
1377
+ nn.init.normal_(module.fc1.weight, std=fc_std)
1378
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
1379
+ if module.fc1.bias is not None:
1380
+ module.fc1.bias.data.zero_()
1381
+ if module.fc2.bias is not None:
1382
+ module.fc2.bias.data.zero_()
1383
+ elif isinstance(module, Kosmos2VisionEncoderLayer):
1384
+ module.layer_norm1.bias.data.zero_()
1385
+ module.layer_norm1.weight.data.fill_(1.0)
1386
+ module.layer_norm2.bias.data.zero_()
1387
+ module.layer_norm2.weight.data.fill_(1.0)
1388
+ elif isinstance(module, Kosmos2VisionTransformer):
1389
+ module.pre_layrnorm.bias.data.zero_()
1390
+ module.pre_layrnorm.weight.data.fill_(1.0)
1391
+ module.post_layernorm.bias.data.zero_()
1392
+ module.post_layernorm.weight.data.fill_(1.0)
1393
+ elif isinstance(module, KosmosTextAttention):
1394
+ nn.init.normal_(module.q_proj.weight, std=std)
1395
+ nn.init.normal_(module.k_proj.weight, std=std)
1396
+ nn.init.normal_(module.v_proj.weight, std=std)
1397
+ nn.init.normal_(module.out_proj.weight, std=std)
1398
+ if module.q_proj.bias is not None:
1399
+ module.q_proj.bias.data.zero_()
1400
+ if module.k_proj.bias is not None:
1401
+ module.k_proj.bias.data.zero_()
1402
+ if module.v_proj.bias is not None:
1403
+ module.v_proj.bias.data.zero_()
1404
+ if module.out_proj.bias is not None:
1405
+ module.out_proj.bias.data.zero_()
1406
+ elif isinstance(module, Kosmos2TextFFN):
1407
+ nn.init.normal_(module.fc1.weight, std=std)
1408
+ nn.init.normal_(module.fc2.weight, std=std)
1409
+ if module.fc1.bias is not None:
1410
+ module.fc1.bias.data.zero_()
1411
+ if module.fc2.bias is not None:
1412
+ module.fc2.bias.data.zero_()
1413
+ elif isinstance(module, Kosmos2TextForCausalLM):
1414
+ nn.init.normal_(module.lm_head.weight, std=std)
1415
+ if module.lm_head.bias is not None:
1416
+ module.lm_head.bias.data.zero_()
1417
+ elif isinstance(module, Kosmos2ImageToTextProjection):
1418
+ nn.init.normal_(module.dense.weight, std=std)
1419
+ if module.dense.bias is not None:
1420
+ module.dense.bias.data.zero_()
1421
+ elif isinstance(module, Kosmos2TextTransformer):
1422
+ module.embed_tokens.weight.data.normal_(mean=0.0, std=std)
1423
+ if module.embed_tokens.padding_idx is not None:
1424
+ module.embed_tokens.weight.data[module.embed_tokens.padding_idx].zero_()
1425
+
1426
+
1427
+ class Kosmos2VisionModel(Kosmos2PreTrainedModel):
1428
+ config_class = Kosmos2VisionConfig
1429
+ main_input_name = "pixel_values"
1430
+
1431
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2,self.vision_model->self.model
1432
+ def __init__(self, config: Kosmos2VisionConfig):
1433
+ super().__init__(config)
1434
+ self.model = Kosmos2VisionTransformer(config)
1435
+ # Initialize weights and apply final processing
1436
+ self.post_init()
1437
+
1438
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.get_input_embeddings with CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2,self.vision_model->self.model
1439
+ def get_input_embeddings(self) -> nn.Module:
1440
+ return self.model.embeddings.patch_embedding
1441
+
1442
+ @add_start_docstrings_to_model_forward(KOSMOS2_VISION_INPUTS_DOCSTRING)
1443
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Kosmos2VisionConfig)
1444
+ def forward(
1445
+ self,
1446
+ pixel_values: Optional[torch.FloatTensor] = None,
1447
+ output_attentions: Optional[bool] = None,
1448
+ output_hidden_states: Optional[bool] = None,
1449
+ return_dict: Optional[bool] = None,
1450
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1451
+ r"""
1452
+ Returns:
1453
+
1454
+ """
1455
+ return self.model(
1456
+ pixel_values=pixel_values,
1457
+ output_attentions=output_attentions,
1458
+ output_hidden_states=output_hidden_states,
1459
+ return_dict=return_dict,
1460
+ )
1461
+
1462
+
1463
+ class Kosmos2TextModel(Kosmos2PreTrainedModel):
1464
+ config_class = Kosmos2TextConfig
1465
+
1466
+ def __init__(self, config: Kosmos2TextConfig):
1467
+ super().__init__(config)
1468
+ self.model = Kosmos2TextTransformer(config)
1469
+ # Initialize weights and apply final processing
1470
+ self.post_init()
1471
+
1472
+ def get_input_embeddings(self) -> nn.Module:
1473
+ return self.model.embed_tokens
1474
+
1475
+ def set_input_embeddings(self, value):
1476
+ self.model.embed_tokens = value
1477
+
1478
+ @add_start_docstrings_to_model_forward(KOSMOS2_TEXT_INPUTS_DOCSTRING)
1479
+ @replace_return_docstrings(output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=Kosmos2TextConfig)
1480
+ def forward(
1481
+ self,
1482
+ input_ids: Optional[torch.Tensor] = None,
1483
+ attention_mask: Optional[torch.Tensor] = None,
1484
+ image_embeds: Optional[torch.Tensor] = None,
1485
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
1486
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1487
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1488
+ head_mask: Optional[torch.Tensor] = None,
1489
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1490
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1491
+ inputs_embeds: Optional[torch.Tensor] = None,
1492
+ position_ids: Optional[torch.Tensor] = None,
1493
+ use_cache: Optional[bool] = None,
1494
+ output_attentions: Optional[bool] = None,
1495
+ output_hidden_states: Optional[bool] = None,
1496
+ return_dict: Optional[bool] = None,
1497
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
1498
+ r"""
1499
+ Returns:
1500
+
1501
+ """
1502
+ return self.model(
1503
+ input_ids=input_ids,
1504
+ attention_mask=attention_mask,
1505
+ image_embeds=image_embeds,
1506
+ image_embeds_position_mask=image_embeds_position_mask,
1507
+ encoder_hidden_states=encoder_hidden_states,
1508
+ encoder_attention_mask=encoder_attention_mask,
1509
+ head_mask=head_mask,
1510
+ cross_attn_head_mask=cross_attn_head_mask,
1511
+ past_key_values=past_key_values,
1512
+ inputs_embeds=inputs_embeds,
1513
+ position_ids=position_ids,
1514
+ use_cache=use_cache,
1515
+ output_attentions=output_attentions,
1516
+ output_hidden_states=output_hidden_states,
1517
+ return_dict=return_dict,
1518
+ )
1519
+
1520
+
1521
+ @add_start_docstrings(
1522
+ """
1523
+ The text model from KOSMOS-2 with a language modeling head on top (linear layer with weights tied to the input
1524
+ embeddings).
1525
+ """,
1526
+ KOSMOS2_START_DOCSTRING,
1527
+ )
1528
+ class Kosmos2TextForCausalLM(Kosmos2PreTrainedModel):
1529
+ config_class = Kosmos2TextConfig
1530
+ _tied_weights_keys = ["lm_head.weight"]
1531
+
1532
+ def __init__(self, config: Kosmos2TextConfig):
1533
+ super().__init__(config)
1534
+
1535
+ self.model = Kosmos2TextTransformer(config)
1536
+ self.lm_head = nn.Linear(in_features=config.embed_dim, out_features=config.vocab_size, bias=False)
1537
+
1538
+ # Initialize weights and apply final processing
1539
+ self.post_init()
1540
+
1541
+ def get_input_embeddings(self) -> nn.Module:
1542
+ return self.model.embed_tokens
1543
+
1544
+ def set_input_embeddings(self, value):
1545
+ self.model.embed_tokens = value
1546
+
1547
+ def get_output_embeddings(self) -> nn.Module:
1548
+ return self.lm_head
1549
+
1550
+ def set_output_embeddings(self, new_embeddings):
1551
+ self.lm_head = new_embeddings
1552
+
1553
+ @add_start_docstrings_to_model_forward(KOSMOS2_TEXT_INPUTS_DOCSTRING)
1554
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=Kosmos2TextConfig)
1555
+ def forward(
1556
+ self,
1557
+ input_ids: Optional[torch.Tensor] = None,
1558
+ attention_mask: Optional[torch.Tensor] = None,
1559
+ image_embeds: Optional[torch.Tensor] = None,
1560
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
1561
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1562
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1563
+ head_mask: Optional[torch.Tensor] = None,
1564
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1565
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1566
+ inputs_embeds: Optional[torch.Tensor] = None,
1567
+ position_ids: Optional[torch.Tensor] = None,
1568
+ labels: Optional[torch.LongTensor] = None,
1569
+ use_cache: Optional[bool] = None,
1570
+ output_attentions: Optional[bool] = None,
1571
+ output_hidden_states: Optional[bool] = None,
1572
+ return_dict: Optional[bool] = None,
1573
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1574
+ r"""
1575
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1576
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1577
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1578
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1579
+
1580
+ Returns:
1581
+
1582
+ """
1583
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1584
+
1585
+ if labels is not None:
1586
+ if use_cache:
1587
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
1588
+ use_cache = False
1589
+
1590
+ outputs = self.model(
1591
+ input_ids=input_ids,
1592
+ attention_mask=attention_mask,
1593
+ image_embeds=image_embeds,
1594
+ image_embeds_position_mask=image_embeds_position_mask,
1595
+ encoder_hidden_states=encoder_hidden_states,
1596
+ encoder_attention_mask=encoder_attention_mask,
1597
+ head_mask=head_mask,
1598
+ cross_attn_head_mask=cross_attn_head_mask,
1599
+ past_key_values=past_key_values,
1600
+ inputs_embeds=inputs_embeds,
1601
+ position_ids=position_ids,
1602
+ use_cache=use_cache,
1603
+ output_attentions=output_attentions,
1604
+ output_hidden_states=output_hidden_states,
1605
+ return_dict=return_dict,
1606
+ )
1607
+ lm_logits = self.lm_head(outputs[0])
1608
+
1609
+ loss = None
1610
+ if labels is not None:
1611
+ # move labels to correct device to enable model parallelism
1612
+ labels = labels.to(lm_logits.device)
1613
+ # Shift so that tokens < n predict n
1614
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1615
+ shift_labels = labels[..., 1:].contiguous()
1616
+ batch_size, seq_length, vocab_size = shift_logits.shape
1617
+ # Flatten the tokens
1618
+ loss_fct = CrossEntropyLoss()
1619
+ loss = loss_fct(
1620
+ shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)
1621
+ )
1622
+
1623
+ if not return_dict:
1624
+ output = (lm_logits,) + outputs[1:]
1625
+ return (loss,) + output if loss is not None else output
1626
+
1627
+ return CausalLMOutputWithCrossAttentions(
1628
+ loss=loss,
1629
+ logits=lm_logits,
1630
+ past_key_values=outputs.past_key_values,
1631
+ hidden_states=outputs.hidden_states,
1632
+ attentions=outputs.attentions,
1633
+ cross_attentions=outputs.cross_attentions,
1634
+ )
1635
+
1636
+ def prepare_inputs_for_generation(
1637
+ self,
1638
+ input_ids,
1639
+ image_embeds=None,
1640
+ image_embeds_position_mask=None,
1641
+ past_key_values=None,
1642
+ attention_mask=None,
1643
+ use_cache=None,
1644
+ **model_kwargs,
1645
+ ):
1646
+ input_shape = input_ids.shape
1647
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1648
+ if attention_mask is None:
1649
+ attention_mask = input_ids.new_ones(input_shape)
1650
+
1651
+ position_ids = None
1652
+
1653
+ # cut input_ids if past_key_values is used
1654
+ if past_key_values is not None:
1655
+ position_ids = create_position_ids_from_input_ids(
1656
+ input_ids,
1657
+ padding_idx=self.config.pad_token_id,
1658
+ past_key_values_length=0,
1659
+ )[:, -1:]
1660
+
1661
+ input_ids = input_ids[:, -1:]
1662
+ # the image info. is already encoded into the past keys/values
1663
+ image_embeds = None
1664
+ image_embeds_position_mask = None
1665
+ elif image_embeds_position_mask is not None:
1666
+ # appending `False` to `image_embeds_position_mask` (because `input_ids` grows during generation)
1667
+ batch_size, seq_len = input_ids.size()
1668
+ mask_len = image_embeds_position_mask.size()[-1]
1669
+ image_embeds_position_mask = torch.cat(
1670
+ (
1671
+ image_embeds_position_mask,
1672
+ torch.zeros(size=(batch_size, seq_len - mask_len), dtype=torch.bool, device=input_ids.device),
1673
+ ),
1674
+ dim=1,
1675
+ )
1676
+
1677
+ return {
1678
+ "input_ids": input_ids,
1679
+ "image_embeds": image_embeds,
1680
+ "image_embeds_position_mask": image_embeds_position_mask,
1681
+ "past_key_values": past_key_values,
1682
+ "attention_mask": attention_mask,
1683
+ "position_ids": position_ids,
1684
+ "use_cache": use_cache,
1685
+ }
1686
+
1687
+ @staticmethod
1688
+ # Copied from transformers.models.umt5.modeling_umt5.UMT5ForConditionalGeneration._reorder_cache
1689
+ def _reorder_cache(past_key_values, beam_idx):
1690
+ reordered_past = ()
1691
+ for layer_past in past_key_values:
1692
+ reordered_past += (
1693
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1694
+ )
1695
+ return reordered_past
1696
+
1697
+
1698
+ class Kosmos2ImageToTextProjection(nn.Module):
1699
+ """The layer that transforms the image model's output to part of the text model's input (namely, image features)"""
1700
+
1701
+ def __init__(self, config: Kosmos2Config):
1702
+ super().__init__()
1703
+ self.dense = nn.Linear(config.vision_config.hidden_size, config.text_config.embed_dim)
1704
+ self.latent_query = nn.Parameter(torch.randn(config.latent_query_num, config.text_config.embed_dim))
1705
+
1706
+ self.x_attn = KosmosTextAttention(
1707
+ config.text_config,
1708
+ config.text_config.embed_dim,
1709
+ config.text_config.attention_heads,
1710
+ dropout=config.text_config.attention_dropout,
1711
+ is_decoder=False,
1712
+ add_inner_attn_layernorm=False,
1713
+ )
1714
+
1715
+ def forward(self, features):
1716
+ hidden_states = self.dense(features)
1717
+
1718
+ # shape = [batch, latent_query_num, h_dim]
1719
+ latent_query = self.latent_query.unsqueeze(0).expand(hidden_states.size(0), -1, -1)
1720
+ key_value_states = torch.cat([hidden_states, latent_query], dim=1)
1721
+
1722
+ hidden_states, attn_weights, _ = self.x_attn(
1723
+ hidden_states=latent_query,
1724
+ encoder_hidden_states=key_value_states,
1725
+ past_key_value=None,
1726
+ attention_mask=None,
1727
+ output_attentions=None,
1728
+ )
1729
+
1730
+ return hidden_states, attn_weights
1731
+
1732
+
1733
+ @add_start_docstrings(
1734
+ """
1735
+ KOSMOS-2 Model for generating text and image features. The model consists of a vision encoder and a language model.
1736
+ """,
1737
+ KOSMOS2_START_DOCSTRING,
1738
+ )
1739
+ class Kosmos2Model(Kosmos2PreTrainedModel):
1740
+ config_class = Kosmos2Config
1741
+ main_input_name = "pixel_values"
1742
+
1743
+ def __init__(self, config: Kosmos2Config):
1744
+ super().__init__(config)
1745
+
1746
+ self.text_model = Kosmos2TextModel(config.text_config)
1747
+ self.vision_model = Kosmos2VisionModel(config.vision_config)
1748
+ self.image_to_text_projection = Kosmos2ImageToTextProjection(config)
1749
+
1750
+ # Initialize weights and apply final processing
1751
+ self.post_init()
1752
+
1753
+ def get_input_embeddings(self) -> nn.Module:
1754
+ return self.text_model.model.embed_tokens
1755
+
1756
+ def set_input_embeddings(self, value):
1757
+ self.text_model.model.embed_tokens = value
1758
+
1759
+ @add_start_docstrings_to_model_forward(KOSMOS2_INPUTS_DOCSTRING)
1760
+ @replace_return_docstrings(output_type=Kosmos2ModelOutput, config_class=_CONFIG_FOR_DOC)
1761
+ def forward(
1762
+ self,
1763
+ pixel_values: Optional[torch.Tensor] = None,
1764
+ input_ids: Optional[torch.Tensor] = None,
1765
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
1766
+ attention_mask: Optional[torch.Tensor] = None,
1767
+ head_mask: Optional[torch.Tensor] = None,
1768
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1769
+ image_embeds: Optional[torch.Tensor] = None,
1770
+ inputs_embeds: Optional[torch.Tensor] = None,
1771
+ position_ids: Optional[torch.Tensor] = None,
1772
+ use_cache: Optional[bool] = None,
1773
+ output_attentions: Optional[bool] = None,
1774
+ output_hidden_states: Optional[bool] = None,
1775
+ return_dict: Optional[bool] = None,
1776
+ ) -> Union[Tuple, Kosmos2ModelOutput]:
1777
+ r"""
1778
+ Returns:
1779
+
1780
+ Examples:
1781
+
1782
+ ```python
1783
+ >>> from PIL import Image
1784
+ >>> import requests
1785
+ >>> from transformers import AutoProcessor, Kosmos2Model
1786
+
1787
+ >>> model = Kosmos2Model.from_pretrained("microsoft/kosmos-2-patch14-224")
1788
+ >>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
1789
+
1790
+ >>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg"
1791
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1792
+
1793
+ >>> text = (
1794
+ ... "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863>"
1795
+ ... "</object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911>"
1796
+ ... "</object>"
1797
+ ... )
1798
+
1799
+ >>> inputs = processor(text=text, images=image, return_tensors="pt", add_eos_token=True)
1800
+
1801
+ >>> last_hidden_state = model(
1802
+ ... pixel_values=inputs["pixel_values"],
1803
+ ... input_ids=inputs["input_ids"],
1804
+ ... attention_mask=inputs["attention_mask"],
1805
+ ... image_embeds_position_mask=inputs["image_embeds_position_mask"],
1806
+ ... ).last_hidden_state
1807
+ >>> list(last_hidden_state.shape)
1808
+ [1, 91, 2048]
1809
+ ```"""
1810
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1811
+ output_hidden_states = (
1812
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1813
+ )
1814
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1815
+
1816
+ vision_model_output = None
1817
+ projection_attentions = None
1818
+ if image_embeds is None:
1819
+ if pixel_values is None:
1820
+ raise ValueError("You have to specify either `pixel_values` or `image_embeds`.")
1821
+
1822
+ vision_model_output = self.vision_model(
1823
+ pixel_values=pixel_values,
1824
+ output_attentions=output_attentions,
1825
+ output_hidden_states=output_hidden_states,
1826
+ return_dict=return_dict,
1827
+ )
1828
+ # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`.
1829
+ image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0])
1830
+ # normalized features
1831
+ image_embeds = nn.functional.normalize(image_embeds, dim=-1)
1832
+ image_embeds, projection_attentions = self.image_to_text_projection(image_embeds)
1833
+
1834
+ outputs = self.text_model(
1835
+ input_ids=input_ids,
1836
+ attention_mask=attention_mask,
1837
+ image_embeds=image_embeds,
1838
+ image_embeds_position_mask=image_embeds_position_mask,
1839
+ head_mask=head_mask,
1840
+ past_key_values=past_key_values,
1841
+ inputs_embeds=inputs_embeds,
1842
+ position_ids=position_ids,
1843
+ use_cache=use_cache,
1844
+ output_attentions=output_attentions,
1845
+ output_hidden_states=output_hidden_states,
1846
+ return_dict=return_dict,
1847
+ )
1848
+
1849
+ if not return_dict:
1850
+ outputs = outputs + (image_embeds, projection_attentions, vision_model_output)
1851
+ return tuple(output for output in outputs if output is not None)
1852
+
1853
+ return Kosmos2ModelOutput(
1854
+ last_hidden_state=outputs.last_hidden_state,
1855
+ past_key_values=outputs.past_key_values,
1856
+ hidden_states=outputs.hidden_states,
1857
+ attentions=outputs.attentions,
1858
+ image_embeds=image_embeds,
1859
+ projection_attentions=projection_attentions,
1860
+ vision_model_output=vision_model_output,
1861
+ )
1862
+
1863
+
1864
+ @add_start_docstrings(
1865
+ """
1866
+ KOSMOS-2 Model for generating text and bounding boxes given an image. The model consists of a vision encoder and a
1867
+ language model.
1868
+ """,
1869
+ KOSMOS2_START_DOCSTRING,
1870
+ )
1871
+ class Kosmos2ForConditionalGeneration(Kosmos2PreTrainedModel):
1872
+ config_class = Kosmos2Config
1873
+ main_input_name = "pixel_values"
1874
+ _tied_weights_keys = ["text_model.lm_head.weight"]
1875
+
1876
+ def __init__(self, config: Kosmos2Config):
1877
+ super().__init__(config)
1878
+
1879
+ self.text_model = Kosmos2TextForCausalLM(config.text_config)
1880
+ self.vision_model = Kosmos2VisionModel(config.vision_config)
1881
+
1882
+ self.image_to_text_projection = Kosmos2ImageToTextProjection(config)
1883
+
1884
+ # Initialize weights and apply final processing
1885
+ self.post_init()
1886
+
1887
+ def get_input_embeddings(self) -> nn.Module:
1888
+ return self.text_model.model.embed_tokens
1889
+
1890
+ def set_input_embeddings(self, value):
1891
+ self.text_model.model.embed_tokens = value
1892
+
1893
+ def get_output_embeddings(self) -> nn.Module:
1894
+ return self.text_model.get_output_embeddings()
1895
+
1896
+ def set_output_embeddings(self, new_embeddings):
1897
+ self.text_model.set_output_embeddings(new_embeddings)
1898
+
1899
+ @add_start_docstrings_to_model_forward(KOSMOS2_INPUTS_DOCSTRING)
1900
+ @replace_return_docstrings(output_type=Kosmos2ForConditionalGenerationModelOutput, config_class=_CONFIG_FOR_DOC)
1901
+ def forward(
1902
+ self,
1903
+ pixel_values: Optional[torch.Tensor] = None,
1904
+ input_ids: Optional[torch.Tensor] = None,
1905
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
1906
+ attention_mask: Optional[torch.Tensor] = None,
1907
+ head_mask: Optional[torch.Tensor] = None,
1908
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1909
+ image_embeds: Optional[torch.Tensor] = None,
1910
+ inputs_embeds: Optional[torch.Tensor] = None,
1911
+ position_ids: Optional[torch.Tensor] = None,
1912
+ labels: Optional[torch.LongTensor] = None,
1913
+ use_cache: Optional[bool] = None,
1914
+ output_attentions: Optional[bool] = None,
1915
+ output_hidden_states: Optional[bool] = None,
1916
+ return_dict: Optional[bool] = None,
1917
+ ) -> Union[Tuple, Kosmos2ForConditionalGenerationModelOutput]:
1918
+ r"""
1919
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1920
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1921
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1922
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1923
+
1924
+ Returns:
1925
+
1926
+ Examples:
1927
+
1928
+ ```python
1929
+ >>> from PIL import Image
1930
+ >>> import requests
1931
+ >>> from transformers import AutoProcessor, Kosmos2ForConditionalGeneration
1932
+
1933
+ >>> model = Kosmos2ForConditionalGeneration.from_pretrained("microsoft/kosmos-2-patch14-224")
1934
+ >>> processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
1935
+
1936
+ >>> url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg"
1937
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1938
+
1939
+ >>> prompt = "<grounding> An image of"
1940
+
1941
+ >>> inputs = processor(text=prompt, images=image, return_tensors="pt")
1942
+
1943
+ >>> generated_ids = model.generate(
1944
+ ... pixel_values=inputs["pixel_values"],
1945
+ ... input_ids=inputs["input_ids"],
1946
+ ... attention_mask=inputs["attention_mask"],
1947
+ ... image_embeds=None,
1948
+ ... image_embeds_position_mask=inputs["image_embeds_position_mask"],
1949
+ ... use_cache=True,
1950
+ ... max_new_tokens=64,
1951
+ ... )
1952
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1953
+ >>> processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False)
1954
+ >>> processed_text
1955
+ '<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>.'
1956
+
1957
+ >>> caption, entities = processor.post_process_generation(generated_text)
1958
+ >>> caption
1959
+ 'An image of a snowman warming himself by a fire.'
1960
+
1961
+ >>> entities
1962
+ [('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])]
1963
+ ```"""
1964
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1965
+ output_hidden_states = (
1966
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1967
+ )
1968
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1969
+
1970
+ vision_model_output = None
1971
+ projection_attentions = None
1972
+ if image_embeds is None:
1973
+ if pixel_values is None:
1974
+ raise ValueError("You have to specify either `pixel_values` or `image_embeds`.")
1975
+
1976
+ vision_model_output = self.vision_model(
1977
+ pixel_values=pixel_values,
1978
+ output_attentions=output_attentions,
1979
+ output_hidden_states=output_hidden_states,
1980
+ return_dict=return_dict,
1981
+ )
1982
+ # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`.
1983
+ image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0])
1984
+ # normalized features
1985
+ image_embeds = nn.functional.normalize(image_embeds, dim=-1)
1986
+ image_embeds, projection_attentions = self.image_to_text_projection(image_embeds)
1987
+
1988
+ lm_outputs = self.text_model(
1989
+ input_ids=input_ids,
1990
+ attention_mask=attention_mask,
1991
+ image_embeds=image_embeds,
1992
+ image_embeds_position_mask=image_embeds_position_mask,
1993
+ head_mask=head_mask,
1994
+ past_key_values=past_key_values,
1995
+ inputs_embeds=inputs_embeds,
1996
+ position_ids=position_ids,
1997
+ labels=labels,
1998
+ use_cache=use_cache,
1999
+ output_attentions=output_attentions,
2000
+ output_hidden_states=output_hidden_states,
2001
+ return_dict=return_dict,
2002
+ )
2003
+
2004
+ if not return_dict:
2005
+ outputs = lm_outputs + (image_embeds, projection_attentions, vision_model_output)
2006
+ return tuple(output for output in outputs if output is not None)
2007
+
2008
+ return Kosmos2ForConditionalGenerationModelOutput(
2009
+ loss=lm_outputs.loss,
2010
+ logits=lm_outputs.logits,
2011
+ past_key_values=lm_outputs.past_key_values,
2012
+ hidden_states=lm_outputs.hidden_states,
2013
+ attentions=lm_outputs.attentions,
2014
+ image_embeds=image_embeds,
2015
+ projection_attentions=projection_attentions,
2016
+ vision_model_output=vision_model_output,
2017
+ )
2018
+
2019
+ def generate(
2020
+ self,
2021
+ pixel_values: Optional[torch.Tensor] = None,
2022
+ image_embeds_position_mask: Optional[torch.Tensor] = None,
2023
+ input_ids: Optional[torch.Tensor] = None,
2024
+ attention_mask: Optional[torch.Tensor] = None,
2025
+ image_embeds: Optional[torch.Tensor] = None,
2026
+ **kwargs,
2027
+ ):
2028
+ # in order to allow `inputs` argument (as in `GenerationMixin`)
2029
+ inputs = kwargs.pop("inputs", None)
2030
+ if pixel_values is not None and inputs is not None:
2031
+ raise ValueError(
2032
+ f"`inputs`: {inputs} were passed alongside `pixel_values` which is not allowed."
2033
+ f"Make sure to either pass `inputs` or pixel_values=..."
2034
+ )
2035
+ if pixel_values is None and inputs is not None:
2036
+ pixel_values = inputs
2037
+
2038
+ if image_embeds is None:
2039
+ vision_model_output = self.vision_model(pixel_values)
2040
+ # The whole `last_hidden_state` through `post_layernorm` instead of just `pooled_output`.
2041
+ image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0])
2042
+ # normalized features
2043
+ image_embeds = nn.functional.normalize(image_embeds, dim=-1)
2044
+ image_embeds, projection_attentions = self.image_to_text_projection(image_embeds)
2045
+
2046
+ output = self.text_model.generate(
2047
+ input_ids=input_ids,
2048
+ attention_mask=attention_mask,
2049
+ image_embeds=image_embeds,
2050
+ image_embeds_position_mask=image_embeds_position_mask,
2051
+ **kwargs,
2052
+ )
2053
+
2054
+ return output
llmeval-env/lib/python3.10/site-packages/transformers/models/kosmos2/processing_kosmos2.py ADDED
@@ -0,0 +1,666 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Processor class for KOSMOS-2."""
16
+
17
+ import copy
18
+ import math
19
+ import re
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ from ...image_processing_utils import BatchFeature
23
+ from ...image_utils import ImageInput, is_batched
24
+ from ...processing_utils import ProcessorMixin
25
+ from ...tokenization_utils import AddedToken
26
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy
27
+ from ...utils import TensorType
28
+
29
+
30
+ BboxInput = Union[
31
+ List[Tuple[int, int]],
32
+ List[Tuple[float, float, float, float]],
33
+ List[List[Tuple[int, int]]],
34
+ List[List[Tuple[float, float, float]]],
35
+ ]
36
+
37
+
38
+ class Kosmos2Processor(ProcessorMixin):
39
+ r"""
40
+ Constructs an KOSMOS-2 processor which wraps a KOSMOS-2 image processor and a KOSMOS-2 tokenizer into a single
41
+ processor.
42
+
43
+ [`Kosmos2Processor`] offers all the functionalities of [`CLIPImageProcessor`] and some functionalities of
44
+ [`XLMRobertaTokenizerFast`]. See the docstring of [`~Kosmos2Processor.__call__`] and [`~Kosmos2Processor.decode`]
45
+ for more information.
46
+
47
+ Args:
48
+ image_processor (`CLIPImageProcessor`):
49
+ An instance of [`CLIPImageProcessor`]. The image processor is a required input.
50
+ tokenizer (`XLMRobertaTokenizerFast`):
51
+ An instance of ['XLMRobertaTokenizerFast`]. The tokenizer is a required input.
52
+ num_patch_index_tokens (`int`, *optional*, defaults to 1024):
53
+ The number of tokens that represent patch indices.
54
+ """
55
+
56
+ attributes = ["image_processor", "tokenizer"]
57
+ image_processor_class = "CLIPImageProcessor"
58
+ tokenizer_class = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
59
+
60
+ def __init__(self, image_processor, tokenizer, num_patch_index_tokens=1024):
61
+ tokenizer.return_token_type_ids = False
62
+
63
+ self.eod_token = "</doc>"
64
+
65
+ self.boi_token = ""
67
+
68
+ self.eoc_token = "</chunk>"
69
+ self.eol_token = "</line>"
70
+
71
+ self.bop_token = "<phrase>"
72
+ self.eop_token = "</phrase>"
73
+
74
+ self.boo_token = "<object>"
75
+ self.eoo_token = "</object>"
76
+
77
+ self.dom_token = "</delimiter_of_multi_objects/>"
78
+
79
+ self.grd_token = "<grounding>"
80
+
81
+ self.tag_tokens = [
82
+ self.eod_token,
83
+ self.boi_token,
84
+ self.eoi_token,
85
+ self.eoc_token,
86
+ self.eol_token,
87
+ self.bop_token,
88
+ self.eop_token,
89
+ self.boo_token,
90
+ self.eoo_token,
91
+ self.dom_token,
92
+ self.grd_token,
93
+ ]
94
+
95
+ self.num_patch_index_tokens = num_patch_index_tokens
96
+ patch_index_tokens = [f"<patch_index_{str(x).zfill(4)}>" for x in range(self.num_patch_index_tokens)]
97
+
98
+ tokens_to_add = []
99
+ for token in self.tag_tokens + patch_index_tokens:
100
+ tokens_to_add.append(AddedToken(token, lstrip=True, rstrip=False, normalized=False))
101
+ tokenizer.add_tokens(tokens_to_add)
102
+
103
+ super().__init__(image_processor, tokenizer)
104
+
105
+ def __call__(
106
+ self,
107
+ images: ImageInput = None,
108
+ text: Union[TextInput, List[TextInput]] = None,
109
+ bboxes: BboxInput = None,
110
+ num_image_tokens: Optional[int] = 64,
111
+ first_image_token_id: Optional[int] = None,
112
+ add_special_tokens: bool = True,
113
+ add_eos_token: bool = False,
114
+ padding: Union[bool, str, PaddingStrategy] = False,
115
+ truncation: Union[bool, str, TruncationStrategy] = None,
116
+ max_length: Optional[int] = None,
117
+ pad_to_multiple_of: Optional[int] = None,
118
+ return_attention_mask: Optional[bool] = None,
119
+ return_length: bool = False,
120
+ verbose: bool = True,
121
+ return_tensors: Optional[Union[str, TensorType]] = None,
122
+ **kwargs,
123
+ ) -> BatchFeature:
124
+ """
125
+ This method uses [`CLIPImageProcessor.__call__`] method to prepare image(s) for the model, and
126
+ [`XLMRobertaTokenizerFast.__call__`] to prepare text for the model.
127
+
128
+ Please refer to the docstring of the above two methods for more information.
129
+
130
+ The rest of this documentation shows the arguments specific to `Kosmos2Processor`.
131
+
132
+ Args:
133
+ bboxes (`Union[List[Tuple[int]], List[Tuple[float]], List[List[Tuple[int]]], List[List[Tuple[float]]]]`, *optional*):
134
+ The bounding bboxes associated to `texts`.
135
+ num_image_tokens (`int`, defaults to 64):
136
+ The number of (consecutive) places that are used to mark the placeholders to store image information.
137
+ This should be the same as `latent_query_num` in the instance of `Kosmos2Config` you are using.
138
+ first_image_token_id (`int`, *optional*):
139
+ The token id that will be used for the first place of the subsequence that is reserved to store image
140
+ information. If unset, will default to `self.tokenizer.unk_token_id + 1`.
141
+ add_eos_token (`bool`, defaults to `False`):
142
+ Whether or not to include `EOS` token id in the encoding when `add_special_tokens=True`.
143
+ """
144
+ if images is None and text is None:
145
+ raise ValueError("You have to specify either images or text.")
146
+
147
+ encoding = BatchFeature()
148
+
149
+ if images is not None:
150
+ image_encoding = self.image_processor(images, return_tensors=return_tensors)
151
+ encoding.update(image_encoding)
152
+
153
+ if text is not None:
154
+ text = self.preprocess_examples(text, images, bboxes, num_image_tokens=num_image_tokens)
155
+
156
+ if add_special_tokens and not add_eos_token:
157
+ if isinstance(text, str):
158
+ text = f"{self.tokenizer.bos_token}{text}"
159
+ elif isinstance(text, list):
160
+ text = [f"{self.tokenizer.bos_token}{s}" for s in text]
161
+
162
+ text_encoding = self.tokenizer(
163
+ text=text,
164
+ add_special_tokens=(add_special_tokens and add_eos_token),
165
+ padding=padding and images is None,
166
+ truncation=truncation,
167
+ max_length=max_length,
168
+ pad_to_multiple_of=pad_to_multiple_of if images is None else pad_to_multiple_of,
169
+ return_attention_mask=return_attention_mask,
170
+ verbose=verbose,
171
+ return_tensors=return_tensors if images is None else None,
172
+ **kwargs,
173
+ )
174
+ encoding.update(text_encoding)
175
+
176
+ if text is not None and images is not None:
177
+ # Use the id of the first token after <unk>
178
+ if first_image_token_id is None:
179
+ first_image_token_id = self.tokenizer.unk_token_id + 1
180
+
181
+ # To see if we need one more `0` (for `<s>`) at the beginning of `image_embeds_position_mask`.
182
+ with_bos = add_special_tokens
183
+
184
+ # The first (actual) ``
311
+ text = f"{img_info_tokens} {text}"
312
+
313
+ # Add `<object> <patch_idx_xxxx> <patch_idx_yyy> </object>` after `<phrase> phrase text </phrase>`
314
+ text = self._insert_patch_index_tokens(text, bboxes)
315
+ return text
316
+
317
+ def preprocess_examples(
318
+ self,
319
+ texts: Union[TextInput, List[TextInput]],
320
+ images: ImageInput = None,
321
+ bboxes: BboxInput = None,
322
+ num_image_tokens: Optional[int] = 64,
323
+ ) -> Union[str, List[str]]:
324
+ """Add image and bounding box information to `texts` as image and patch index tokens.
325
+
326
+ Args:
327
+ texts (`Union[TextInput, List[TextInput]]`): The texts to be processed.
328
+ images (`ImageInput`, *optional*): The images associated to `texts`.
329
+ bboxes (`Union[List[Tuple[int]], List[Tuple[float]], List[List[Tuple[int]]], List[List[Tuple[float]]]]`, *optional*):
330
+ The bounding bboxes associated to `texts`.
331
+ num_image_tokens (`int`, *optional*, defaults to 64):
332
+ The number of image tokens (used as latent queries). This should corresponds to the `latent_query_num`
333
+ attribute in `Kosmos2Config`.
334
+
335
+ Returns:
336
+ `Union[TextInput, List[TextInput]]`: The processed texts with image and patch index tokens.
337
+ """
338
+ # These are fake ``.
339
+ img_tokens = [self.boi_token] * num_image_tokens
340
+ img_info_tokens = " ".join([self.boi_token] + img_tokens + [self.eoi_token])
341
+
342
+ # make batch to simplify processing logic
343
+ batched = True
344
+ if isinstance(texts, str):
345
+ batched = False
346
+ texts = [texts]
347
+
348
+ if images is None:
349
+ images = [None] * len(texts)
350
+ elif not is_batched(images):
351
+ images = [images]
352
+ if len(texts) != len(images):
353
+ raise ValueError(
354
+ f"The number of examples in `texts` and `images` should be the same. Got {len(texts)} v.s. {len(images)} instead."
355
+ )
356
+
357
+ if not batched:
358
+ self._check_bboxes_for_single_text(bboxes)
359
+ bboxes = [bboxes]
360
+ elif bboxes is not None:
361
+ if not isinstance(bboxes, list):
362
+ raise ValueError("`bboxes` should be `None` or a list (as a batch) when `texts` is passed as a batch.")
363
+ for x in bboxes:
364
+ self._check_bboxes_for_single_text(x)
365
+ else:
366
+ bboxes = [None] * len(texts)
367
+
368
+ if len(bboxes) != len(texts):
369
+ raise ValueError(
370
+ f"The number of examples in `texts` and `bboxes` should be the same. Got {len(texts)} v.s. {len(bboxes)} instead."
371
+ )
372
+
373
+ result = [
374
+ self._preprocess_single_example(text, image, bbox, img_info_tokens)
375
+ for text, image, bbox in zip(texts, images, bboxes)
376
+ ]
377
+ # un-batch if necessary
378
+ if not batched:
379
+ result = result[0]
380
+
381
+ return result
382
+
383
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer
384
+ def batch_decode(self, *args, **kwargs):
385
+ """
386
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
387
+ refer to the docstring of this method for more information.
388
+ """
389
+ return self.tokenizer.batch_decode(*args, **kwargs)
390
+
391
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer
392
+ def decode(self, *args, **kwargs):
393
+ """
394
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
395
+ the docstring of this method for more information.
396
+ """
397
+ return self.tokenizer.decode(*args, **kwargs)
398
+
399
+ def post_process_generation(self, text, cleanup_and_extract=True):
400
+ caption = text.split(self.eoi_token)[-1]
401
+ if cleanup_and_extract:
402
+ return clean_text_and_extract_entities_with_bboxes(caption)
403
+ return caption
404
+
405
+ @property
406
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
407
+ def model_input_names(self):
408
+ tokenizer_input_names = self.tokenizer.model_input_names
409
+ image_processor_input_names = self.image_processor.model_input_names
410
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
411
+
412
+ def _insert_patch_index_tokens(self, text: str, bboxes: Union[List[Tuple[int]], List[Tuple[float]]]) -> str:
413
+ if bboxes is None or len(bboxes) == 0:
414
+ return text
415
+
416
+ matched_phrases = list(re.finditer(r"<phrase>.+?</phrase>", string=text))
417
+ if len(matched_phrases) != len(bboxes):
418
+ raise ValueError(
419
+ f"The number of elements in `bboxes` should be the same as the number of `<phrase> ... </phrase>` pairs in `text`. Got {len(matched_phrases)} v.s. {len(bboxes)} instead."
420
+ )
421
+
422
+ # insert object's patch index tokens
423
+ # the found `<phrase> ... </phrase>` pairs.
424
+ curr_pos = 0
425
+ buffer = []
426
+ for matched, bbox in zip(matched_phrases, bboxes):
427
+ _, end = matched.span()
428
+ buffer.append(text[curr_pos:end])
429
+ curr_pos = end
430
+ # A phrase without bbox
431
+ if bbox is None:
432
+ continue
433
+ # A phrase with a single bbox
434
+ if isinstance(bbox, tuple):
435
+ bbox = [bbox]
436
+ patch_index_strings = []
437
+ # A phrase could have multiple bboxes
438
+ if not all(box is not None for box in bbox):
439
+ raise ValueError(
440
+ "The multiple bounding boxes for a single phrase should not contain any `None` value."
441
+ )
442
+ for box in bbox:
443
+ patch_index_1, patch_index_2 = self._convert_bbox_to_patch_index_tokens(box)
444
+ patch_index_strings.append(f"{patch_index_1} {patch_index_2}")
445
+ # `bbox` being an empty list
446
+ if len(patch_index_strings) == 0:
447
+ continue
448
+ position_str = " </delimiter_of_multi_objects/> ".join(patch_index_strings)
449
+ buffer.append(f"<object> {position_str} </object>")
450
+ # remaining
451
+ if curr_pos < len(text):
452
+ buffer.append(text[curr_pos:])
453
+
454
+ text = "".join(buffer)
455
+ return text
456
+
457
+ def _convert_bbox_to_patch_index_tokens(
458
+ self, bbox: Union[Tuple[int, int], Tuple[float, float, float, float]]
459
+ ) -> Tuple[str, str]:
460
+ # already computed patch indices
461
+ if len(bbox) == 2:
462
+ idx_1, idx_2 = bbox
463
+ # bbox specified with (normalized) coordinates
464
+ else:
465
+ # use `self.tokenizer` to get `num_patches_per_side`
466
+ num_patches_per_side = int(math.sqrt(self.num_patch_index_tokens))
467
+ idx_1, idx_2 = coordinate_to_patch_index(bbox, num_patches_per_side)
468
+
469
+ token_1 = f"<patch_index_{str(idx_1).zfill(4)}>"
470
+ token_2 = f"<patch_index_{str(idx_2).zfill(4)}>"
471
+
472
+ return token_1, token_2
473
+
474
+
475
+ def coordinate_to_patch_index(bbox: Tuple[float, float, float, float], num_patches_per_side: int) -> Tuple[int, int]:
476
+ """Convert a bounding box to a pair of patch indices.
477
+
478
+ Args:
479
+ bbox (`Tuple[float, float, float, float]`):
480
+ The 4 coordinates of the bounding box, with the format being (x1, y1, x2, y2) specifying the upper-left and
481
+ lower-right corners of the box. It should have x2 > x1 and y2 > y1.
482
+ num_patches_per_side (`int`): the number of patches along each side.
483
+
484
+ Returns:
485
+ `Tuple[int, int]`: A pair of patch indices representing the upper-left patch and lower-right patch.
486
+ """
487
+ (x1, y1, x2, y2) = bbox
488
+
489
+ if not (x2 > x1 and y2 > y1):
490
+ raise ValueError("The coordinates in `bbox` should be `(x1, y1, x2, y2)` with `x2 > x1` and `y2 > y1`.")
491
+
492
+ ul_x = math.floor(x1 * num_patches_per_side)
493
+ ul_y = math.floor(y1 * num_patches_per_side)
494
+
495
+ lr_x = math.ceil(x2 * num_patches_per_side - 1)
496
+ lr_y = math.ceil(y2 * num_patches_per_side - 1)
497
+
498
+ ul_idx = ul_y * num_patches_per_side + ul_x
499
+ lr_idx = lr_y * num_patches_per_side + lr_x
500
+
501
+ return ul_idx, lr_idx
502
+
503
+
504
+ # copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L35C1-L75C38
505
+ # (with format modifications)
506
+ def patch_index_to_coordinate(ul_idx: int, lr_idx: int, num_patches_per_side: int):
507
+ """
508
+ Given a grid of length `num_patches_per_side` and the indices of the upper-left and lower-right corners of a
509
+ bounding box, returns the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2).
510
+
511
+ Args:
512
+ ul_idx (`int`): the index of the grid cell that corresponds to the upper-left corner of the bounding box.
513
+ lr_idx (`int`): the index of the grid cell that corresponds to the lower-right corner of the bounding box.
514
+ num_patches_per_side (`int`): the number of patches along each side.
515
+
516
+ Returns:
517
+ `Tuple[float]`: the normalized coordinates of the bounding box, in the form (x1, y1, x2, y2).
518
+ """
519
+ # Compute the size of each cell in the grid
520
+ cell_size = 1.0 / num_patches_per_side
521
+
522
+ # Compute the x and y indices of the upper-left and lower-right corners of the bounding box
523
+ ul_x = ul_idx % num_patches_per_side
524
+ ul_y = ul_idx // num_patches_per_side
525
+
526
+ lr_x = lr_idx % num_patches_per_side
527
+ lr_y = lr_idx // num_patches_per_side
528
+
529
+ # Compute the normalized coordinates of the bounding box
530
+ if ul_idx == lr_idx:
531
+ x1 = ul_x * cell_size
532
+ y1 = ul_y * cell_size
533
+ x2 = lr_x * cell_size + cell_size
534
+ y2 = lr_y * cell_size + cell_size
535
+ elif ul_x == lr_x or ul_y == lr_y:
536
+ x1 = ul_x * cell_size
537
+ y1 = ul_y * cell_size
538
+ x2 = lr_x * cell_size + cell_size
539
+ y2 = lr_y * cell_size + cell_size
540
+ else:
541
+ x1 = ul_x * cell_size + cell_size / 2
542
+ y1 = ul_y * cell_size + cell_size / 2
543
+ x2 = lr_x * cell_size + cell_size / 2
544
+ y2 = lr_y * cell_size + cell_size / 2
545
+
546
+ return x1, y1, x2, y2
547
+
548
+
549
+ # copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L4-L33
550
+ # (with format modifications)
551
+ def extract_entities_with_patch_indices(text):
552
+ """Extract entities contained in `text`. The bounding bboxes is given in the form of patch indices.
553
+
554
+ This functioin is only intended to be used within `clean_text_and_extract_entities_with_bboxes` where further
555
+ processing happens, including converting to normalized coordinates and whitespace character cleaning up.
556
+
557
+ Examples:
558
+
559
+ ```python
560
+ >>> text = "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>."
561
+ >>> entities = extract_entities_with_patch_indices(text)
562
+ >>> entities
563
+ [(' a snowman', (31, 41), [(44, 863)]), (' a fire', (130, 137), [(5, 911)])]
564
+ ```"""
565
+ # The regular expression pattern for matching the required formats
566
+ pattern = r"(?:(<phrase>([^<]+)</phrase>))?<object>((?:<patch_index_\d+><patch_index_\d+></delimiter_of_multi_objects/>)*<patch_index_\d+><patch_index_\d+>)</object>"
567
+
568
+ # Find all matches in the given string
569
+ matches = re.finditer(pattern, text)
570
+
571
+ # Initialize an empty list to store the valid patch_index combinations
572
+ entities_with_patch_indices = []
573
+
574
+ for match in matches:
575
+ # span of a `phrase` that is between <phrase> and </phrase>
576
+ span = match.span(2)
577
+ phrase_tag, phrase, match_content = match.groups()
578
+ if not phrase_tag:
579
+ phrase = None
580
+ # We take the starting position of `<object>`
581
+ span = (match.span(0)[0], match.span(0)[0])
582
+
583
+ # Split the match_content by the delimiter to get individual patch_index pairs
584
+ patch_index_pairs = match_content.split("</delimiter_of_multi_objects/>")
585
+
586
+ entity_bboxes = []
587
+ for pair in patch_index_pairs:
588
+ # Extract the xxxx and yyyy values from the patch_index pair
589
+ x = re.search(r"<patch_index_(\d+)>", pair)
590
+ y = re.search(r"<patch_index_(\d+)>", pair[1:])
591
+
592
+ if x and y:
593
+ if phrase:
594
+ entity_bboxes.append((int(x.group(1)), int(y.group(1))))
595
+ else:
596
+ entity_bboxes.append((int(x.group(1)), int(y.group(1))))
597
+
598
+ if phrase:
599
+ entities_with_patch_indices.append((phrase, span, entity_bboxes))
600
+ else:
601
+ for bbox in entity_bboxes:
602
+ # fake entity name
603
+ entity = f"<patch_index_{bbox[0]}><patch_index_{bbox[1]}>"
604
+ entities_with_patch_indices.append((entity, span, [bbox]))
605
+
606
+ return entities_with_patch_indices
607
+
608
+
609
+ def adjust_entity_positions(entity, text):
610
+ """Adjust the positions of the entities in `text` to be relative to the text with special fields removed."""
611
+ entity_name, (start, end) = entity
612
+ # computed the length of strings with special fields (tag tokens, patch index tokens, etc.) removed
613
+ adjusted_start = len(re.sub("<.*?>", "", text[:start]))
614
+ adjusted_end = len(re.sub("<.*?>", "", text[:end]))
615
+ adjusted_entity = (entity_name, (adjusted_start, adjusted_end))
616
+ return adjusted_entity
617
+
618
+
619
+ def _cleanup_spaces(text, entities):
620
+ """Remove the spaces around the text and the entities in it."""
621
+ new_text = text.strip()
622
+ leading_spaces = len(text) - len(text.lstrip())
623
+
624
+ new_entities = []
625
+ for entity_name, (start, end), bboxes in entities:
626
+ entity_name_leading_spaces = len(entity_name) - len(entity_name.lstrip())
627
+ entity_name_trailing_spaces = len(entity_name) - len(entity_name.rstrip())
628
+
629
+ start = start - leading_spaces + entity_name_leading_spaces
630
+ end = end - leading_spaces - entity_name_trailing_spaces
631
+ entity_name = entity_name.strip()
632
+
633
+ new_entities.append((entity_name, (start, end), bboxes))
634
+
635
+ return new_text, new_entities
636
+
637
+
638
+ # copied from https://github.com/microsoft/unilm/blob/97e4923e97d3ee10b57e97013556e3fd0d207a9b/kosmos-2/demo/decode_string.py#L77-L87
639
+ # (with format modifications)
640
+ def clean_text_and_extract_entities_with_bboxes(text, num_patches_per_side=32):
641
+ """Remove the tag tokens from `text`, extract entities in it with some cleaning up of white characters.
642
+
643
+ Examples:
644
+
645
+ ```python
646
+ >>> text = "<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>."
647
+ >>> clean_text, entities = clean_text_and_extract_entities_with_bboxes(text)
648
+ >>> clean_text
649
+ 'An image of a snowman warming himself by a fire.'
650
+
651
+ >>> entities
652
+ [('a snowman', (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]), ('a fire', (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)])]
653
+ ```"""
654
+ # remove special fields (tag tokens, patch index tokens, etc.)
655
+ processed_text = re.sub("<.*?>", "", text)
656
+
657
+ entities_with_patch_indices = extract_entities_with_patch_indices(text)
658
+ entities = []
659
+ for item in entities_with_patch_indices:
660
+ entity, bboxes = item[0:2], item[2]
661
+ adjusted_entity = adjust_entity_positions(entity, text)
662
+ bboxes_in_coords = [patch_index_to_coordinate(bbox[0], bbox[1], num_patches_per_side) for bbox in bboxes]
663
+
664
+ entities.append(adjusted_entity + (bboxes_in_coords,))
665
+
666
+ return _cleanup_spaces(processed_text, entities)
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__init__.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
28
+ }
29
+
30
+ try:
31
+ if not is_sentencepiece_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_llama"] = ["LlamaTokenizer"]
37
+
38
+ try:
39
+ if not is_tokenizers_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["tokenization_llama_fast"] = ["LlamaTokenizerFast"]
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ _import_structure["modeling_llama"] = [
53
+ "LlamaForCausalLM",
54
+ "LlamaModel",
55
+ "LlamaPreTrainedModel",
56
+ "LlamaForSequenceClassification",
57
+ "LlamaForQuestionAnswering",
58
+ ]
59
+
60
+ try:
61
+ if not is_flax_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_flax_llama"] = ["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"]
67
+
68
+
69
+ if TYPE_CHECKING:
70
+ from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
71
+
72
+ try:
73
+ if not is_sentencepiece_available():
74
+ raise OptionalDependencyNotAvailable()
75
+ except OptionalDependencyNotAvailable:
76
+ pass
77
+ else:
78
+ from .tokenization_llama import LlamaTokenizer
79
+
80
+ try:
81
+ if not is_tokenizers_available():
82
+ raise OptionalDependencyNotAvailable()
83
+ except OptionalDependencyNotAvailable:
84
+ pass
85
+ else:
86
+ from .tokenization_llama_fast import LlamaTokenizerFast
87
+
88
+ try:
89
+ if not is_torch_available():
90
+ raise OptionalDependencyNotAvailable()
91
+ except OptionalDependencyNotAvailable:
92
+ pass
93
+ else:
94
+ from .modeling_llama import (
95
+ LlamaForCausalLM,
96
+ LlamaForQuestionAnswering,
97
+ LlamaForSequenceClassification,
98
+ LlamaModel,
99
+ LlamaPreTrainedModel,
100
+ )
101
+
102
+ try:
103
+ if not is_flax_available():
104
+ raise OptionalDependencyNotAvailable()
105
+ except OptionalDependencyNotAvailable:
106
+ pass
107
+ else:
108
+ from .modeling_flax_llama import FlaxLlamaForCausalLM, FlaxLlamaModel, FlaxLlamaPreTrainedModel
109
+
110
+
111
+ else:
112
+ import sys
113
+
114
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/configuration_llama.cpython-310.pyc ADDED
Binary file (7.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/convert_llama_weights_to_hf.cpython-310.pyc ADDED
Binary file (8.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_flax_llama.cpython-310.pyc ADDED
Binary file (22.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_llama.cpython-310.pyc ADDED
Binary file (47.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama.cpython-310.pyc ADDED
Binary file (18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama_fast.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/configuration_llama.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ LLaMA model configuration"""
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class LlamaConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
35
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
36
+ defaults will yield a similar configuration to that of the LLaMA-7B.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 32000):
44
+ Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
45
+ `inputs_ids` passed when calling [`LlamaModel`]
46
+ hidden_size (`int`, *optional*, defaults to 4096):
47
+ Dimension of the hidden representations.
48
+ intermediate_size (`int`, *optional*, defaults to 11008):
49
+ Dimension of the MLP representations.
50
+ num_hidden_layers (`int`, *optional*, defaults to 32):
51
+ Number of hidden layers in the Transformer decoder.
52
+ num_attention_heads (`int`, *optional*, defaults to 32):
53
+ Number of attention heads for each attention layer in the Transformer decoder.
54
+ num_key_value_heads (`int`, *optional*):
55
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
56
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
57
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
58
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
59
+ by meanpooling all the original heads within that group. For more details checkout [this
60
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
61
+ `num_attention_heads`.
62
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
63
+ The non-linear activation function (function or string) in the decoder.
64
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
65
+ The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
66
+ Llama 2 up to 4096, CodeLlama up to 16384.
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
70
+ The epsilon used by the rms normalization layers.
71
+ use_cache (`bool`, *optional*, defaults to `True`):
72
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
73
+ relevant if `config.is_decoder=True`.
74
+ pad_token_id (`int`, *optional*):
75
+ Padding token id.
76
+ bos_token_id (`int`, *optional*, defaults to 1):
77
+ Beginning of stream token id.
78
+ eos_token_id (`int`, *optional*, defaults to 2):
79
+ End of stream token id.
80
+ pretraining_tp (`int`, *optional*, defaults to 1):
81
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
82
+ document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to understand more about it. This value is
83
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
84
+ issue](https://github.com/pytorch/pytorch/issues/76232).
85
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
86
+ Whether to tie weight embeddings
87
+ rope_theta (`float`, *optional*, defaults to 10000.0):
88
+ The base period of the RoPE embeddings.
89
+ rope_scaling (`Dict`, *optional*):
90
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
91
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
92
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
93
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
94
+ these scaling strategies behave:
95
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
96
+ experimental feature, subject to breaking API changes in future versions.
97
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
98
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
99
+ attention_dropout (`float`, *optional*, defaults to 0.0):
100
+ The dropout ratio for the attention probabilities.
101
+
102
+ ```python
103
+ >>> from transformers import LlamaModel, LlamaConfig
104
+
105
+ >>> # Initializing a LLaMA llama-7b style configuration
106
+ >>> configuration = LlamaConfig()
107
+
108
+ >>> # Initializing a model from the llama-7b style configuration
109
+ >>> model = LlamaModel(configuration)
110
+
111
+ >>> # Accessing the model configuration
112
+ >>> configuration = model.config
113
+ ```"""
114
+
115
+ model_type = "llama"
116
+ keys_to_ignore_at_inference = ["past_key_values"]
117
+
118
+ def __init__(
119
+ self,
120
+ vocab_size=32000,
121
+ hidden_size=4096,
122
+ intermediate_size=11008,
123
+ num_hidden_layers=32,
124
+ num_attention_heads=32,
125
+ num_key_value_heads=None,
126
+ hidden_act="silu",
127
+ max_position_embeddings=2048,
128
+ initializer_range=0.02,
129
+ rms_norm_eps=1e-6,
130
+ use_cache=True,
131
+ pad_token_id=None,
132
+ bos_token_id=1,
133
+ eos_token_id=2,
134
+ pretraining_tp=1,
135
+ tie_word_embeddings=False,
136
+ rope_theta=10000.0,
137
+ rope_scaling=None,
138
+ attention_bias=False,
139
+ attention_dropout=0.0,
140
+ **kwargs,
141
+ ):
142
+ self.vocab_size = vocab_size
143
+ self.max_position_embeddings = max_position_embeddings
144
+ self.hidden_size = hidden_size
145
+ self.intermediate_size = intermediate_size
146
+ self.num_hidden_layers = num_hidden_layers
147
+ self.num_attention_heads = num_attention_heads
148
+
149
+ # for backward compatibility
150
+ if num_key_value_heads is None:
151
+ num_key_value_heads = num_attention_heads
152
+
153
+ self.num_key_value_heads = num_key_value_heads
154
+ self.hidden_act = hidden_act
155
+ self.initializer_range = initializer_range
156
+ self.rms_norm_eps = rms_norm_eps
157
+ self.pretraining_tp = pretraining_tp
158
+ self.use_cache = use_cache
159
+ self.rope_theta = rope_theta
160
+ self.rope_scaling = rope_scaling
161
+ self._rope_scaling_validation()
162
+ self.attention_bias = attention_bias
163
+ self.attention_dropout = attention_dropout
164
+
165
+ super().__init__(
166
+ pad_token_id=pad_token_id,
167
+ bos_token_id=bos_token_id,
168
+ eos_token_id=eos_token_id,
169
+ tie_word_embeddings=tie_word_embeddings,
170
+ **kwargs,
171
+ )
172
+
173
+ def _rope_scaling_validation(self):
174
+ """
175
+ Validate the `rope_scaling` configuration.
176
+ """
177
+ if self.rope_scaling is None:
178
+ return
179
+
180
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
181
+ raise ValueError(
182
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
183
+ )
184
+ rope_scaling_type = self.rope_scaling.get("type", None)
185
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
186
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
187
+ raise ValueError(
188
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
189
+ )
190
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
191
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/convert_llama_weights_to_hf.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import gc
16
+ import json
17
+ import os
18
+ import shutil
19
+ import warnings
20
+
21
+ import torch
22
+
23
+ from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
24
+
25
+
26
+ try:
27
+ from transformers import LlamaTokenizerFast
28
+ except ImportError as e:
29
+ warnings.warn(e)
30
+ warnings.warn(
31
+ "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
32
+ )
33
+ LlamaTokenizerFast = None
34
+
35
+ """
36
+ Sample usage:
37
+
38
+ ```
39
+ python src/transformers/models/llama/convert_llama_weights_to_hf.py \
40
+ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path
41
+ ```
42
+
43
+ Thereafter, models can be loaded via:
44
+
45
+ ```py
46
+ from transformers import LlamaForCausalLM, LlamaTokenizer
47
+
48
+ model = LlamaForCausalLM.from_pretrained("/output/path")
49
+ tokenizer = LlamaTokenizer.from_pretrained("/output/path")
50
+ ```
51
+
52
+ Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
53
+ come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
54
+ """
55
+
56
+ NUM_SHARDS = {
57
+ "7B": 1,
58
+ "7Bf": 1,
59
+ "13B": 2,
60
+ "13Bf": 2,
61
+ "34B": 4,
62
+ "30B": 4,
63
+ "65B": 8,
64
+ "70B": 8,
65
+ "70Bf": 8,
66
+ }
67
+
68
+
69
+ def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256):
70
+ return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
71
+
72
+
73
+ def read_json(path):
74
+ with open(path, "r") as f:
75
+ return json.load(f)
76
+
77
+
78
+ def write_json(text, path):
79
+ with open(path, "w") as f:
80
+ json.dump(text, f)
81
+
82
+
83
+ def write_model(
84
+ model_path, input_base_path, model_size, tokenizer_path=None, safe_serialization=True, llama_version=1
85
+ ):
86
+ # for backward compatibility, before you needed the repo to be called `my_repo/model_size`
87
+ if not os.path.isfile(os.path.join(input_base_path, "params.json")):
88
+ input_base_path = os.path.join(input_base_path, model_size)
89
+
90
+ os.makedirs(model_path, exist_ok=True)
91
+ tmp_model_path = os.path.join(model_path, "tmp")
92
+ os.makedirs(tmp_model_path, exist_ok=True)
93
+
94
+ params = read_json(os.path.join(input_base_path, "params.json"))
95
+ num_shards = NUM_SHARDS[model_size]
96
+ params = params.get("model", params)
97
+ n_layers = params["n_layers"]
98
+ n_heads = params["n_heads"]
99
+ n_heads_per_shard = n_heads // num_shards
100
+ dim = params["dim"]
101
+ dims_per_head = dim // n_heads
102
+ base = params.get("rope_theta", 10000.0)
103
+ inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
104
+ if base > 10000.0:
105
+ max_position_embeddings = 16384
106
+ else:
107
+ # Depending on the Llama version, the default max_position_embeddings has different values.
108
+ if llama_version == 1:
109
+ max_position_embeddings = 2048
110
+ elif llama_version == 2:
111
+ max_position_embeddings = 4096
112
+ else:
113
+ raise NotImplementedError(
114
+ f"Version {llama_version} of llama is not supported yet. "
115
+ "Current supported versions of llama are [1, 2]."
116
+ )
117
+
118
+ tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
119
+ if tokenizer_path is not None:
120
+ tokenizer = tokenizer_class(tokenizer_path)
121
+ tokenizer.save_pretrained(model_path)
122
+ vocab_size = tokenizer.vocab_size if tokenizer_path is not None else 32000
123
+
124
+ if params.get("n_kv_heads", None) is not None:
125
+ num_key_value_heads = params["n_kv_heads"] # for GQA / MQA
126
+ num_local_key_value_heads = n_heads_per_shard // num_key_value_heads
127
+ key_value_dim = dim // num_key_value_heads
128
+ else: # compatibility with other checkpoints
129
+ num_key_value_heads = n_heads
130
+ num_local_key_value_heads = n_heads_per_shard
131
+ key_value_dim = dim
132
+
133
+ # permute for sliced rotary
134
+ def permute(w, n_heads=n_heads, dim1=dim, dim2=dim):
135
+ return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2)
136
+
137
+ print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
138
+ # Load weights
139
+ if num_shards == 1:
140
+ # Not sharded
141
+ # (The sharded implementation would also work, but this is simpler.)
142
+ loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu")
143
+ else:
144
+ # Sharded
145
+ loaded = [
146
+ torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu")
147
+ for i in range(num_shards)
148
+ ]
149
+ param_count = 0
150
+ index_dict = {"weight_map": {}}
151
+ for layer_i in range(n_layers):
152
+ filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
153
+ if num_shards == 1:
154
+ # Unsharded
155
+ state_dict = {
156
+ f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
157
+ loaded[f"layers.{layer_i}.attention.wq.weight"]
158
+ ),
159
+ f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
160
+ loaded[f"layers.{layer_i}.attention.wk.weight"]
161
+ ),
162
+ f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
163
+ f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
164
+ f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
165
+ f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
166
+ f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
167
+ f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
168
+ f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
169
+ }
170
+ else:
171
+ # Sharded
172
+ # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
173
+ # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
174
+ # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
175
+
176
+ state_dict = {
177
+ f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
178
+ f"layers.{layer_i}.attention_norm.weight"
179
+ ].clone(),
180
+ f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
181
+ f"layers.{layer_i}.ffn_norm.weight"
182
+ ].clone(),
183
+ }
184
+ state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
185
+ torch.cat(
186
+ [
187
+ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim)
188
+ for i in range(num_shards)
189
+ ],
190
+ dim=0,
191
+ ).reshape(dim, dim)
192
+ )
193
+ state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
194
+ torch.cat(
195
+ [
196
+ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
197
+ num_local_key_value_heads, dims_per_head, dim
198
+ )
199
+ for i in range(num_shards)
200
+ ],
201
+ dim=0,
202
+ ).reshape(key_value_dim, dim),
203
+ num_key_value_heads,
204
+ key_value_dim,
205
+ dim,
206
+ )
207
+ state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
208
+ [
209
+ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
210
+ num_local_key_value_heads, dims_per_head, dim
211
+ )
212
+ for i in range(num_shards)
213
+ ],
214
+ dim=0,
215
+ ).reshape(key_value_dim, dim)
216
+
217
+ state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
218
+ [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1
219
+ )
220
+ state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
221
+ [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0
222
+ )
223
+ state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
224
+ [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1
225
+ )
226
+ state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
227
+ [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0
228
+ )
229
+
230
+ state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
231
+ for k, v in state_dict.items():
232
+ index_dict["weight_map"][k] = filename
233
+ param_count += v.numel()
234
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
235
+
236
+ filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
237
+ if num_shards == 1:
238
+ # Unsharded
239
+ state_dict = {
240
+ "model.embed_tokens.weight": loaded["tok_embeddings.weight"],
241
+ "model.norm.weight": loaded["norm.weight"],
242
+ "lm_head.weight": loaded["output.weight"],
243
+ }
244
+ else:
245
+ state_dict = {
246
+ "model.norm.weight": loaded[0]["norm.weight"],
247
+ "model.embed_tokens.weight": torch.cat(
248
+ [loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1
249
+ ),
250
+ "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0),
251
+ }
252
+
253
+ for k, v in state_dict.items():
254
+ index_dict["weight_map"][k] = filename
255
+ param_count += v.numel()
256
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
257
+
258
+ # Write configs
259
+ index_dict["metadata"] = {"total_size": param_count * 2}
260
+ write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
261
+ ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
262
+ multiple_of = params["multiple_of"] if "multiple_of" in params else 256
263
+ config = LlamaConfig(
264
+ hidden_size=dim,
265
+ intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of),
266
+ num_attention_heads=params["n_heads"],
267
+ num_hidden_layers=params["n_layers"],
268
+ rms_norm_eps=params["norm_eps"],
269
+ num_key_value_heads=num_key_value_heads,
270
+ vocab_size=vocab_size,
271
+ rope_theta=base,
272
+ max_position_embeddings=max_position_embeddings,
273
+ )
274
+ config.save_pretrained(tmp_model_path)
275
+
276
+ # Make space so we can load the model properly now.
277
+ del state_dict
278
+ del loaded
279
+ gc.collect()
280
+
281
+ print("Loading the checkpoint in a Llama model.")
282
+ model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True)
283
+ # Avoid saving this as part of the config.
284
+ del model.config._name_or_path
285
+ model.config.torch_dtype = torch.float16
286
+ print("Saving in the Transformers format.")
287
+ model.save_pretrained(model_path, safe_serialization=safe_serialization)
288
+ shutil.rmtree(tmp_model_path)
289
+
290
+
291
+ def write_tokenizer(tokenizer_path, input_tokenizer_path):
292
+ # Initialize the tokenizer based on the `spm` model
293
+ tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
294
+ print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.")
295
+ tokenizer = tokenizer_class(input_tokenizer_path)
296
+ tokenizer.save_pretrained(tokenizer_path)
297
+
298
+
299
+ def main():
300
+ parser = argparse.ArgumentParser()
301
+ parser.add_argument(
302
+ "--input_dir",
303
+ help="Location of LLaMA weights, which contains tokenizer.model and model folders",
304
+ )
305
+ parser.add_argument(
306
+ "--model_size",
307
+ choices=["7B", "7Bf", "13B", "13Bf", "30B", "34B", "65B", "70B", "70Bf", "tokenizer_only"],
308
+ help="'f' models correspond to the finetuned versions, and are specific to the Llama2 official release. For more details on Llama2, checkout the original repo: https://huggingface.co/meta-llama",
309
+ )
310
+ parser.add_argument(
311
+ "--output_dir",
312
+ help="Location to write HF model and tokenizer",
313
+ )
314
+ parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.")
315
+ # Different Llama versions used different default values for max_position_embeddings, hence the need to be able to specify which version is being used.
316
+ parser.add_argument(
317
+ "--llama_version",
318
+ choices=[1, 2],
319
+ default=1,
320
+ type=int,
321
+ help="Version of the Llama model to convert. Currently supports Llama1 and Llama2. Controls the context size",
322
+ )
323
+ args = parser.parse_args()
324
+ spm_path = os.path.join(args.input_dir, "tokenizer.model")
325
+ if args.model_size != "tokenizer_only":
326
+ write_model(
327
+ model_path=args.output_dir,
328
+ input_base_path=args.input_dir,
329
+ model_size=args.model_size,
330
+ safe_serialization=args.safe_serialization,
331
+ tokenizer_path=spm_path,
332
+ llama_version=args.llama_version,
333
+ )
334
+ else:
335
+ write_tokenizer(args.output_dir, spm_path)
336
+
337
+
338
+ if __name__ == "__main__":
339
+ main()
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/modeling_flax_llama.py ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta AI, EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """Flax LLaMA model."""
21
+ from functools import partial
22
+ from typing import Optional, Tuple
23
+
24
+ import flax.linen as nn
25
+ import jax
26
+ import jax.numpy as jnp
27
+ import numpy as np
28
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
29
+ from flax.linen import combine_masks, make_causal_mask
30
+ from flax.linen.attention import dot_product_attention_weights
31
+ from flax.traverse_util import flatten_dict, unflatten_dict
32
+ from jax import lax
33
+
34
+ from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
35
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
36
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
37
+ from .configuration_llama import LlamaConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CONFIG_FOR_DOC = "LlamaConfig"
43
+ _CHECKPOINT_FOR_DOC = "afmck/testing-llama-tiny"
44
+ _REAL_CHECKPOINT_FOR_DOC = "openlm-research/open_llama_3b_v2"
45
+
46
+ LLAMA_START_DOCSTRING = r"""
47
+
48
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
49
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
50
+ etc.)
51
+
52
+ This model is also a Flax Linen
53
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
54
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
55
+
56
+ Finally, this model supports inherent JAX features such as:
57
+
58
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
59
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
60
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
61
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
62
+
63
+ Parameters:
64
+ config ([`LlamaConfig`]): Model configuration class with all the parameters of the model.
65
+ Initializing with a config file does not load the weights associated with the model, only the
66
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
67
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
68
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16`, or
69
+ `jax.numpy.bfloat16`.
70
+
71
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
72
+ specified all the computation will be performed with the given `dtype`.
73
+
74
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
75
+ parameters.**
76
+
77
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
78
+ [`~FlaxPreTrainedModel.to_bf16`].
79
+ """
80
+
81
+ LLAMA_INPUTS_DOCSTRING = r"""
82
+ Args:
83
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
84
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
85
+ it.
86
+
87
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
88
+ [`PreTrainedTokenizer.__call__`] for details.
89
+
90
+ [What are input IDs?](../glossary#input-ids)
91
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
92
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
93
+
94
+ - 1 for tokens that are **not masked**,
95
+ - 0 for tokens that are **masked**.
96
+
97
+ [What are attention masks?](../glossary#attention-mask)
98
+
99
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
100
+ [`PreTrainedTokenizer.__call__`] for details.
101
+
102
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
103
+ `past_key_values`).
104
+
105
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
106
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
107
+ information on the default strategy.
108
+
109
+ - 1 indicates the head is **not masked**,
110
+ - 0 indicates the head is **masked**.
111
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
112
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
113
+ config.n_positions - 1]`.
114
+
115
+ [What are position IDs?](../glossary#position-ids)
116
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
117
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
118
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
119
+ output_attentions (`bool`, *optional*):
120
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
121
+ tensors for more detail.
122
+ output_hidden_states (`bool`, *optional*):
123
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
124
+ more detail.
125
+ return_dict (`bool`, *optional*):
126
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
127
+ """
128
+
129
+
130
+ def create_sinusoidal_positions(num_pos, dim):
131
+ inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
132
+ freqs = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
133
+
134
+ emb = np.concatenate((freqs, freqs), axis=-1)
135
+ out = np.concatenate((np.sin(emb)[:, None, :], np.cos(emb)[:, None, :]), axis=-1)
136
+ return jnp.array(out[:, :, :num_pos])
137
+
138
+
139
+ def rotate_half(tensor):
140
+ """Rotates half the hidden dims of the input."""
141
+ rotate_half_tensor = jnp.concatenate(
142
+ (-tensor[..., tensor.shape[-1] // 2 :], tensor[..., : tensor.shape[-1] // 2]), axis=-1
143
+ )
144
+ return rotate_half_tensor
145
+
146
+
147
+ def apply_rotary_pos_emb(tensor, sin_pos, cos_pos):
148
+ return (tensor * cos_pos) + (rotate_half(tensor) * sin_pos)
149
+
150
+
151
+ class FlaxLlamaRMSNorm(nn.Module):
152
+ config: LlamaConfig
153
+ dtype: jnp.dtype = jnp.float32
154
+
155
+ def setup(self):
156
+ self.epsilon = self.config.rms_norm_eps
157
+ self.weight = self.param("weight", lambda _, shape: jnp.ones(shape), self.config.hidden_size)
158
+
159
+ def __call__(self, hidden_states):
160
+ variance = jnp.asarray(hidden_states, dtype=jnp.float32)
161
+ variance = jnp.power(variance, 2)
162
+ variance = variance.mean(-1, keepdims=True)
163
+ # use `jax.numpy.sqrt` as `jax.lax.rsqrt` does not match `torch.rsqrt`
164
+ hidden_states = hidden_states / jnp.sqrt(variance + self.epsilon)
165
+
166
+ return self.weight * jnp.asarray(hidden_states, dtype=self.dtype)
167
+
168
+
169
+ class FlaxLlamaRotaryEmbedding(nn.Module):
170
+ config: LlamaConfig
171
+ dtype: jnp.dtype = jnp.float32
172
+
173
+ def setup(self):
174
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
175
+ self.sincos = create_sinusoidal_positions(self.config.max_position_embeddings, head_dim)
176
+
177
+ def __call__(self, key, query, position_ids):
178
+ sincos = self.sincos[position_ids]
179
+ sin_pos, cos_pos = jnp.split(sincos, 2, axis=-1)
180
+
181
+ key = apply_rotary_pos_emb(key, sin_pos, cos_pos)
182
+ query = apply_rotary_pos_emb(query, sin_pos, cos_pos)
183
+
184
+ key = jnp.asarray(key, dtype=self.dtype)
185
+ query = jnp.asarray(query, dtype=self.dtype)
186
+
187
+ return key, query
188
+
189
+
190
+ class FlaxLlamaAttention(nn.Module):
191
+ config: LlamaConfig
192
+ dtype: jnp.dtype = jnp.float32
193
+ causal: bool = True
194
+ is_cross_attention: bool = False
195
+
196
+ def setup(self):
197
+ config = self.config
198
+ self.embed_dim = config.hidden_size
199
+ self.num_heads = config.num_attention_heads
200
+ self.head_dim = self.embed_dim // self.num_heads
201
+ self.num_key_value_heads = config.num_key_value_heads
202
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
203
+ self.attention_softmax_in_fp32 = self.dtype is not jnp.float32
204
+
205
+ dense = partial(
206
+ nn.Dense,
207
+ use_bias=config.attention_bias,
208
+ dtype=self.dtype,
209
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
210
+ )
211
+
212
+ self.q_proj = dense(self.num_heads * self.head_dim)
213
+ self.k_proj = dense(self.num_key_value_heads * self.head_dim)
214
+ self.v_proj = dense(self.num_key_value_heads * self.head_dim)
215
+ self.o_proj = dense(self.embed_dim)
216
+ if (self.head_dim * self.num_heads) != self.embed_dim:
217
+ raise ValueError(
218
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.embed_dim}"
219
+ f" and `num_heads`: {self.num_heads})."
220
+ )
221
+
222
+ self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
223
+ self.rotary_emb = FlaxLlamaRotaryEmbedding(config, dtype=self.dtype)
224
+
225
+ def _split_heads(self, hidden_states, num_heads):
226
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
227
+
228
+ def _merge_heads(self, hidden_states):
229
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
230
+
231
+ @nn.compact
232
+ # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoSelfAttention._concatenate_to_cache
233
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
234
+ """
235
+ This function takes projected key, value states from a single input token and concatenates the states to cached
236
+ states from previous steps. This function is slighly adapted from the official Flax repository:
237
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
238
+ """
239
+ # detect if we're initializing by absence of existing cache data.
240
+ is_initialized = self.has_variable("cache", "cached_key")
241
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
242
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
243
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
244
+
245
+ if is_initialized:
246
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
247
+ # update key, value caches with our new 1d spatial slices
248
+ cur_index = cache_index.value
249
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
250
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
251
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
252
+ cached_key.value = key
253
+ cached_value.value = value
254
+ num_updated_cache_vectors = query.shape[1]
255
+ cache_index.value = cache_index.value + num_updated_cache_vectors
256
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
257
+ pad_mask = jnp.broadcast_to(
258
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
259
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
260
+ )
261
+ attention_mask = combine_masks(pad_mask, attention_mask)
262
+ return key, value, attention_mask
263
+
264
+ def __call__(
265
+ self,
266
+ hidden_states,
267
+ attention_mask,
268
+ position_ids,
269
+ deterministic: bool = True,
270
+ init_cache: bool = False,
271
+ output_attentions: bool = False,
272
+ ):
273
+ query = self.q_proj(hidden_states)
274
+ key = self.k_proj(hidden_states)
275
+ value = self.v_proj(hidden_states)
276
+
277
+ query = self._split_heads(query, self.num_heads)
278
+ key = self._split_heads(key, self.num_key_value_heads)
279
+ value = self._split_heads(value, self.num_key_value_heads)
280
+
281
+ key, query = self.rotary_emb(key, query, position_ids)
282
+
283
+ query_length, key_length = query.shape[1], key.shape[1]
284
+
285
+ if self.has_variable("cache", "cached_key"):
286
+ mask_shift = self.variables["cache"]["cache_index"]
287
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
288
+ causal_mask = lax.dynamic_slice(
289
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
290
+ )
291
+ else:
292
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
293
+
294
+ batch_size = hidden_states.shape[0]
295
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
296
+
297
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
298
+ attention_mask = combine_masks(attention_mask, causal_mask)
299
+
300
+ dropout_rng = None
301
+ if not deterministic and self.config.attention_dropout > 0.0:
302
+ dropout_rng = self.make_rng("dropout")
303
+
304
+ # During fast autoregressive decoding, we feed one position at a time,
305
+ # and cache the keys and values step by step.
306
+ if self.has_variable("cache", "cached_key") or init_cache:
307
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
308
+
309
+ key = jnp.repeat(key, self.num_key_value_groups, axis=2)
310
+ value = jnp.repeat(value, self.num_key_value_groups, axis=2)
311
+
312
+ # transform boolean mask into float mask
313
+ attention_bias = lax.select(
314
+ attention_mask > 0,
315
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
316
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
317
+ )
318
+
319
+ # usual dot product attention
320
+ attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype
321
+ attn_weights = dot_product_attention_weights(
322
+ query,
323
+ key,
324
+ bias=attention_bias,
325
+ dropout_rng=dropout_rng,
326
+ dropout_rate=self.config.attention_dropout,
327
+ deterministic=deterministic,
328
+ dtype=attention_dtype,
329
+ )
330
+
331
+ if self.attention_softmax_in_fp32:
332
+ attn_weights = attn_weights.astype(self.dtype)
333
+
334
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
335
+ attn_output = self._merge_heads(attn_output)
336
+ attn_output = self.o_proj(attn_output)
337
+
338
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
339
+ return outputs
340
+
341
+
342
+ class FlaxLlamaMLP(nn.Module):
343
+ config: LlamaConfig
344
+ dtype: jnp.dtype = jnp.float32
345
+
346
+ def setup(self):
347
+ embed_dim = self.config.hidden_size
348
+ inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * embed_dim
349
+
350
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
351
+ self.act = ACT2FN[self.config.hidden_act]
352
+
353
+ self.gate_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
354
+ self.down_proj = nn.Dense(embed_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
355
+ self.up_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
356
+
357
+ def __call__(self, hidden_states):
358
+ up_proj_states = self.up_proj(hidden_states)
359
+ gate_states = self.act(self.gate_proj(hidden_states))
360
+
361
+ hidden_states = self.down_proj(up_proj_states * gate_states)
362
+ return hidden_states
363
+
364
+
365
+ class FlaxLlamaDecoderLayer(nn.Module):
366
+ config: LlamaConfig
367
+ dtype: jnp.dtype = jnp.float32
368
+
369
+ def setup(self):
370
+ self.input_layernorm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype)
371
+ self.self_attn = FlaxLlamaAttention(self.config, dtype=self.dtype)
372
+ self.post_attention_layernorm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype)
373
+ self.mlp = FlaxLlamaMLP(self.config, dtype=self.dtype)
374
+
375
+ def __call__(
376
+ self,
377
+ hidden_states,
378
+ attention_mask=None,
379
+ position_ids=None,
380
+ deterministic: bool = True,
381
+ init_cache: bool = False,
382
+ output_attentions: bool = False,
383
+ ):
384
+ residual = hidden_states
385
+ hidden_states = self.input_layernorm(hidden_states)
386
+ outputs = self.self_attn(
387
+ hidden_states,
388
+ attention_mask=attention_mask,
389
+ position_ids=position_ids,
390
+ deterministic=deterministic,
391
+ init_cache=init_cache,
392
+ output_attentions=output_attentions,
393
+ )
394
+ # residual connection
395
+ attn_output = outputs[0]
396
+ hidden_states = residual + attn_output
397
+
398
+ residual = hidden_states
399
+ hidden_states = self.post_attention_layernorm(hidden_states)
400
+ hidden_states = self.mlp(hidden_states)
401
+ # residual connection
402
+ hidden_states = residual + hidden_states
403
+
404
+ return (hidden_states,) + outputs[1:]
405
+
406
+
407
+ # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoPreTrainedModel with GPTNeo->Llama, GPT_NEO->LLAMA, transformer->model
408
+ class FlaxLlamaPreTrainedModel(FlaxPreTrainedModel):
409
+ """
410
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
411
+ models.
412
+ """
413
+
414
+ config_class = LlamaConfig
415
+ base_model_prefix = "model"
416
+ module_class: nn.Module = None
417
+
418
+ def __init__(
419
+ self,
420
+ config: LlamaConfig,
421
+ input_shape: Tuple = (1, 1),
422
+ seed: int = 0,
423
+ dtype: jnp.dtype = jnp.float32,
424
+ _do_init: bool = True,
425
+ **kwargs,
426
+ ):
427
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
428
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
429
+
430
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
431
+ # init input tensors
432
+ input_ids = jnp.zeros(input_shape, dtype="i4")
433
+ attention_mask = jnp.ones_like(input_ids)
434
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
435
+ params_rng, dropout_rng = jax.random.split(rng)
436
+ rngs = {"params": params_rng, "dropout": dropout_rng}
437
+
438
+ random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"]
439
+
440
+ if params is not None:
441
+ random_params = flatten_dict(unfreeze(random_params))
442
+ params = flatten_dict(unfreeze(params))
443
+ for missing_key in self._missing_keys:
444
+ params[missing_key] = random_params[missing_key]
445
+ self._missing_keys = set()
446
+ return freeze(unflatten_dict(params))
447
+ else:
448
+ return random_params
449
+
450
+ def init_cache(self, batch_size, max_length):
451
+ r"""
452
+ Args:
453
+ batch_size (`int`):
454
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
455
+ max_length (`int`):
456
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
457
+ cache.
458
+ """
459
+ # init input variables to retrieve cache
460
+ input_ids = jnp.ones((batch_size, max_length))
461
+ attention_mask = jnp.ones_like(input_ids)
462
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
463
+
464
+ init_variables = self.module.init(
465
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
466
+ )
467
+ return unfreeze(init_variables["cache"])
468
+
469
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
470
+ def __call__(
471
+ self,
472
+ input_ids,
473
+ attention_mask=None,
474
+ position_ids=None,
475
+ params: dict = None,
476
+ past_key_values: dict = None,
477
+ dropout_rng: jax.random.PRNGKey = None,
478
+ train: bool = False,
479
+ output_attentions: Optional[bool] = None,
480
+ output_hidden_states: Optional[bool] = None,
481
+ return_dict: Optional[bool] = None,
482
+ ):
483
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
484
+ output_hidden_states = (
485
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
486
+ )
487
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
488
+
489
+ batch_size, sequence_length = input_ids.shape
490
+
491
+ if position_ids is None:
492
+ if past_key_values is not None:
493
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
494
+
495
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
496
+
497
+ if attention_mask is None:
498
+ attention_mask = jnp.ones((batch_size, sequence_length))
499
+
500
+ # Handle any PRNG if needed
501
+ rngs = {}
502
+ if dropout_rng is not None:
503
+ rngs["dropout"] = dropout_rng
504
+
505
+ inputs = {"params": params or self.params}
506
+
507
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxLlamaAttention module
508
+ if past_key_values:
509
+ inputs["cache"] = past_key_values
510
+ mutable = ["cache"]
511
+ else:
512
+ mutable = False
513
+
514
+ outputs = self.module.apply(
515
+ inputs,
516
+ jnp.array(input_ids, dtype="i4"),
517
+ jnp.array(attention_mask, dtype="i4"),
518
+ jnp.array(position_ids, dtype="i4"),
519
+ not train,
520
+ False,
521
+ output_attentions,
522
+ output_hidden_states,
523
+ return_dict,
524
+ rngs=rngs,
525
+ mutable=mutable,
526
+ )
527
+
528
+ # add updated cache to model output
529
+ if past_key_values is not None and return_dict:
530
+ outputs, past_key_values = outputs
531
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
532
+ return outputs
533
+ elif past_key_values is not None and not return_dict:
534
+ outputs, past_key_values = outputs
535
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
536
+
537
+ return outputs
538
+
539
+
540
+ class FlaxLlamaLayerCollection(nn.Module):
541
+ config: LlamaConfig
542
+ dtype: jnp.dtype = jnp.float32
543
+
544
+ def setup(self):
545
+ self.blocks = [
546
+ FlaxLlamaDecoderLayer(self.config, dtype=self.dtype, name=str(i))
547
+ for i in range(self.config.num_hidden_layers)
548
+ ]
549
+
550
+ def __call__(
551
+ self,
552
+ hidden_states,
553
+ attention_mask=None,
554
+ position_ids=None,
555
+ deterministic: bool = True,
556
+ init_cache: bool = False,
557
+ output_attentions: bool = False,
558
+ output_hidden_states: bool = False,
559
+ return_dict: bool = False,
560
+ ):
561
+ all_attentions = () if output_attentions else None
562
+ all_hidden_states = () if output_hidden_states else None
563
+
564
+ for block in self.blocks:
565
+ if output_hidden_states:
566
+ all_hidden_states += (hidden_states,)
567
+ layer_outputs = block(
568
+ hidden_states,
569
+ attention_mask=attention_mask,
570
+ position_ids=position_ids,
571
+ deterministic=deterministic,
572
+ init_cache=init_cache,
573
+ output_attentions=output_attentions,
574
+ )
575
+ hidden_states = layer_outputs[0]
576
+
577
+ if output_attentions:
578
+ all_attentions += (layer_outputs[1],)
579
+
580
+ # this contains possible `None` values - `FlaxLlamaModule` will filter them out
581
+ outputs = (hidden_states, all_hidden_states, all_attentions)
582
+
583
+ return outputs
584
+
585
+
586
+ class FlaxLlamaModule(nn.Module):
587
+ config: LlamaConfig
588
+ dtype: jnp.dtype = jnp.float32
589
+
590
+ def setup(self):
591
+ self.hidden_size = self.config.hidden_size
592
+ embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range)
593
+ self.embed_tokens = nn.Embed(
594
+ self.config.vocab_size,
595
+ self.hidden_size,
596
+ embedding_init=embedding_init,
597
+ dtype=self.dtype,
598
+ )
599
+ self.layers = FlaxLlamaLayerCollection(self.config, dtype=self.dtype)
600
+ self.norm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype)
601
+
602
+ def __call__(
603
+ self,
604
+ input_ids,
605
+ attention_mask=None,
606
+ position_ids=None,
607
+ deterministic=True,
608
+ init_cache: bool = False,
609
+ output_attentions: bool = False,
610
+ output_hidden_states: bool = False,
611
+ return_dict: bool = True,
612
+ ):
613
+ input_embeds = self.embed_tokens(input_ids.astype("i4"))
614
+
615
+ outputs = self.layers(
616
+ input_embeds,
617
+ position_ids=position_ids,
618
+ attention_mask=attention_mask,
619
+ deterministic=deterministic,
620
+ init_cache=init_cache,
621
+ output_attentions=output_attentions,
622
+ output_hidden_states=output_hidden_states,
623
+ return_dict=return_dict,
624
+ )
625
+
626
+ hidden_states = outputs[0]
627
+ hidden_states = self.norm(hidden_states)
628
+
629
+ if output_hidden_states:
630
+ all_hidden_states = outputs[1] + (hidden_states,)
631
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
632
+ else:
633
+ outputs = (hidden_states,) + outputs[1:]
634
+
635
+ if not return_dict:
636
+ return tuple(v for v in outputs if v is not None)
637
+
638
+ return FlaxBaseModelOutput(
639
+ last_hidden_state=hidden_states,
640
+ hidden_states=outputs[1],
641
+ attentions=outputs[-1],
642
+ )
643
+
644
+
645
+ @add_start_docstrings(
646
+ "The bare Llama Model transformer outputting raw hidden-states without any specific head on top.",
647
+ LLAMA_START_DOCSTRING,
648
+ )
649
+ class FlaxLlamaModel(FlaxLlamaPreTrainedModel):
650
+ module_class = FlaxLlamaModule
651
+
652
+
653
+ append_call_sample_docstring(
654
+ FlaxLlamaModel,
655
+ _CHECKPOINT_FOR_DOC,
656
+ FlaxBaseModelOutput,
657
+ _CONFIG_FOR_DOC,
658
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
659
+ )
660
+
661
+
662
+ class FlaxLlamaForCausalLMModule(nn.Module):
663
+ config: LlamaConfig
664
+ dtype: jnp.dtype = jnp.float32
665
+
666
+ def setup(self):
667
+ self.model = FlaxLlamaModule(self.config, dtype=self.dtype)
668
+ self.lm_head = nn.Dense(
669
+ self.config.vocab_size,
670
+ use_bias=False,
671
+ dtype=self.dtype,
672
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
673
+ )
674
+
675
+ def __call__(
676
+ self,
677
+ input_ids,
678
+ attention_mask=None,
679
+ position_ids=None,
680
+ deterministic: bool = True,
681
+ init_cache: bool = False,
682
+ output_attentions: bool = False,
683
+ output_hidden_states: bool = False,
684
+ return_dict: bool = True,
685
+ ):
686
+ outputs = self.model(
687
+ input_ids,
688
+ position_ids=position_ids,
689
+ attention_mask=attention_mask,
690
+ deterministic=deterministic,
691
+ init_cache=init_cache,
692
+ output_attentions=output_attentions,
693
+ output_hidden_states=output_hidden_states,
694
+ return_dict=return_dict,
695
+ )
696
+
697
+ hidden_states = outputs[0]
698
+ lm_logits = self.lm_head(hidden_states)
699
+
700
+ if not return_dict:
701
+ return (lm_logits,) + outputs[1:]
702
+
703
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
704
+
705
+
706
+ @add_start_docstrings(
707
+ """
708
+ The Llama Model transformer with a language modeling head (linear layer) on top.
709
+ """,
710
+ LLAMA_START_DOCSTRING,
711
+ )
712
+ # Copied from transformers.models.gptj.modeling_flax_gptj.FlaxGPTJForCausalLM with GPTJ->Llama
713
+ class FlaxLlamaForCausalLM(FlaxLlamaPreTrainedModel):
714
+ module_class = FlaxLlamaForCausalLMModule
715
+
716
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
717
+ # initializing the cache
718
+ batch_size, seq_length = input_ids.shape
719
+
720
+ past_key_values = self.init_cache(batch_size, max_length)
721
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
722
+ # But since Llama uses a causal mask, those positions are masked anyways.
723
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
724
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
725
+ if attention_mask is not None:
726
+ position_ids = attention_mask.cumsum(axis=-1) - 1
727
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
728
+ else:
729
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
730
+
731
+ return {
732
+ "past_key_values": past_key_values,
733
+ "attention_mask": extended_attention_mask,
734
+ "position_ids": position_ids,
735
+ }
736
+
737
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
738
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
739
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
740
+ return model_kwargs
741
+
742
+
743
+ append_call_sample_docstring(
744
+ FlaxLlamaForCausalLM,
745
+ _CHECKPOINT_FOR_DOC,
746
+ FlaxCausalLMOutput,
747
+ _CONFIG_FOR_DOC,
748
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
749
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py ADDED
@@ -0,0 +1,1566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """PyTorch LLaMA model."""
21
+
22
+ import math
23
+ import warnings
24
+ from typing import List, Optional, Tuple, Union
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
+
32
+ from ...activations import ACT2FN
33
+ from ...cache_utils import Cache, DynamicCache, StaticCache
34
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
35
+ from ...modeling_outputs import (
36
+ BaseModelOutputWithPast,
37
+ CausalLMOutputWithPast,
38
+ QuestionAnsweringModelOutput,
39
+ SequenceClassifierOutputWithPast,
40
+ )
41
+ from ...modeling_utils import PreTrainedModel
42
+ from ...pytorch_utils import ALL_LAYERNORM_LAYERS
43
+ from ...utils import (
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ is_flash_attn_2_available,
47
+ is_flash_attn_greater_or_equal_2_10,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+ from .configuration_llama import LlamaConfig
52
+
53
+
54
+ if is_flash_attn_2_available():
55
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
56
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
57
+
58
+
59
+ logger = logging.get_logger(__name__)
60
+
61
+ _CONFIG_FOR_DOC = "LlamaConfig"
62
+
63
+
64
+ def _get_unpad_data(attention_mask):
65
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
66
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
67
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
68
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
69
+ return (
70
+ indices,
71
+ cu_seqlens,
72
+ max_seqlen_in_batch,
73
+ )
74
+
75
+
76
+ class LlamaRMSNorm(nn.Module):
77
+ def __init__(self, hidden_size, eps=1e-6):
78
+ """
79
+ LlamaRMSNorm is equivalent to T5LayerNorm
80
+ """
81
+ super().__init__()
82
+ self.weight = nn.Parameter(torch.ones(hidden_size))
83
+ self.variance_epsilon = eps
84
+
85
+ def forward(self, hidden_states):
86
+ input_dtype = hidden_states.dtype
87
+ hidden_states = hidden_states.to(torch.float32)
88
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
89
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
90
+ return self.weight * hidden_states.to(input_dtype)
91
+
92
+
93
+ ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm)
94
+
95
+
96
+ class LlamaRotaryEmbedding(nn.Module):
97
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
98
+ super().__init__()
99
+ self.scaling_factor = scaling_factor
100
+ self.dim = dim
101
+ self.max_position_embeddings = max_position_embeddings
102
+ self.base = base
103
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
104
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
105
+ # For BC we register cos and sin cached
106
+ self.max_seq_len_cached = max_position_embeddings
107
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
108
+ t = t / self.scaling_factor
109
+ freqs = torch.outer(t, self.inv_freq)
110
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
111
+ emb = torch.cat((freqs, freqs), dim=-1)
112
+ self.register_buffer("_cos_cached", emb.cos().to(torch.get_default_dtype()), persistent=False)
113
+ self.register_buffer("_sin_cached", emb.sin().to(torch.get_default_dtype()), persistent=False)
114
+
115
+ @property
116
+ def sin_cached(self):
117
+ logger.warning_once(
118
+ "The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use "
119
+ "the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class"
120
+ )
121
+ return self._sin_cached
122
+
123
+ @property
124
+ def cos_cached(self):
125
+ logger.warning_once(
126
+ "The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use "
127
+ "the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class"
128
+ )
129
+ return self._cos_cached
130
+
131
+ @torch.no_grad()
132
+ def forward(self, x, position_ids):
133
+ # x: [bs, num_attention_heads, seq_len, head_size]
134
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
135
+ position_ids_expanded = position_ids[:, None, :].float()
136
+ # Force float32 since bfloat16 loses precision on long contexts
137
+ # See https://github.com/huggingface/transformers/pull/29285
138
+ device_type = x.device.type
139
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
140
+ with torch.autocast(device_type=device_type, enabled=False):
141
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
142
+ emb = torch.cat((freqs, freqs), dim=-1)
143
+ cos = emb.cos()
144
+ sin = emb.sin()
145
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
146
+
147
+
148
+ class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):
149
+ """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
150
+
151
+ def forward(self, x, position_ids):
152
+ # difference to the original RoPE: a scaling factor is aplied to the position ids
153
+ position_ids = position_ids.float() / self.scaling_factor
154
+ cos, sin = super().forward(x, position_ids)
155
+ return cos, sin
156
+
157
+
158
+ class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):
159
+ """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
160
+
161
+ def forward(self, x, position_ids):
162
+ # difference to the original RoPE: inv_freq is recomputed when the sequence length > original length
163
+ seq_len = torch.max(position_ids) + 1
164
+ if seq_len > self.max_position_embeddings:
165
+ base = self.base * (
166
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
167
+ ) ** (self.dim / (self.dim - 2))
168
+ inv_freq = 1.0 / (
169
+ base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim)
170
+ )
171
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: this may break with compilation
172
+
173
+ cos, sin = super().forward(x, position_ids)
174
+ return cos, sin
175
+
176
+
177
+ def rotate_half(x):
178
+ """Rotates half the hidden dims of the input."""
179
+ x1 = x[..., : x.shape[-1] // 2]
180
+ x2 = x[..., x.shape[-1] // 2 :]
181
+ return torch.cat((-x2, x1), dim=-1)
182
+
183
+
184
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
185
+ """Applies Rotary Position Embedding to the query and key tensors.
186
+
187
+ Args:
188
+ q (`torch.Tensor`): The query tensor.
189
+ k (`torch.Tensor`): The key tensor.
190
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
191
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
192
+ position_ids (`torch.Tensor`, *optional*):
193
+ Deprecated and unused.
194
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
195
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
196
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
197
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
198
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
199
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
200
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
201
+ Returns:
202
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
203
+ """
204
+ cos = cos.unsqueeze(unsqueeze_dim)
205
+ sin = sin.unsqueeze(unsqueeze_dim)
206
+ q_embed = (q * cos) + (rotate_half(q) * sin)
207
+ k_embed = (k * cos) + (rotate_half(k) * sin)
208
+ return q_embed, k_embed
209
+
210
+
211
+ class LlamaMLP(nn.Module):
212
+ def __init__(self, config):
213
+ super().__init__()
214
+ self.config = config
215
+ self.hidden_size = config.hidden_size
216
+ self.intermediate_size = config.intermediate_size
217
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
218
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
219
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
220
+ self.act_fn = ACT2FN[config.hidden_act]
221
+
222
+ def forward(self, x):
223
+ if self.config.pretraining_tp > 1:
224
+ slice = self.intermediate_size // self.config.pretraining_tp
225
+ gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
226
+ up_proj_slices = self.up_proj.weight.split(slice, dim=0)
227
+ down_proj_slices = self.down_proj.weight.split(slice, dim=1)
228
+
229
+ gate_proj = torch.cat(
230
+ [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
231
+ )
232
+ up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
233
+
234
+ intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
235
+ down_proj = [
236
+ F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
237
+ ]
238
+ down_proj = sum(down_proj)
239
+ else:
240
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
241
+
242
+ return down_proj
243
+
244
+
245
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
246
+ """
247
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
248
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
249
+ """
250
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
251
+ if n_rep == 1:
252
+ return hidden_states
253
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
254
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
255
+
256
+
257
+ class LlamaAttention(nn.Module):
258
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
259
+
260
+ def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None):
261
+ super().__init__()
262
+ self.config = config
263
+ self.layer_idx = layer_idx
264
+ if layer_idx is None:
265
+ logger.warning_once(
266
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
267
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
268
+ "when creating this class."
269
+ )
270
+
271
+ self.attention_dropout = config.attention_dropout
272
+ self.hidden_size = config.hidden_size
273
+ self.num_heads = config.num_attention_heads
274
+ self.head_dim = self.hidden_size // self.num_heads
275
+ self.num_key_value_heads = config.num_key_value_heads
276
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
277
+ self.max_position_embeddings = config.max_position_embeddings
278
+ self.rope_theta = config.rope_theta
279
+ self.is_causal = True
280
+
281
+ if (self.head_dim * self.num_heads) != self.hidden_size:
282
+ raise ValueError(
283
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
284
+ f" and `num_heads`: {self.num_heads})."
285
+ )
286
+
287
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
288
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
289
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
290
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
291
+ self._init_rope()
292
+
293
+ def _init_rope(self):
294
+ if self.config.rope_scaling is None:
295
+ self.rotary_emb = LlamaRotaryEmbedding(
296
+ self.head_dim,
297
+ max_position_embeddings=self.max_position_embeddings,
298
+ base=self.rope_theta,
299
+ )
300
+ else:
301
+ scaling_type = self.config.rope_scaling["type"]
302
+ scaling_factor = self.config.rope_scaling["factor"]
303
+ if scaling_type == "linear":
304
+ self.rotary_emb = LlamaLinearScalingRotaryEmbedding(
305
+ self.head_dim,
306
+ max_position_embeddings=self.max_position_embeddings,
307
+ scaling_factor=scaling_factor,
308
+ base=self.rope_theta,
309
+ )
310
+ elif scaling_type == "dynamic":
311
+ self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(
312
+ self.head_dim,
313
+ max_position_embeddings=self.max_position_embeddings,
314
+ scaling_factor=scaling_factor,
315
+ base=self.rope_theta,
316
+ )
317
+ else:
318
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
319
+
320
+ def forward(
321
+ self,
322
+ hidden_states: torch.Tensor,
323
+ attention_mask: Optional[torch.Tensor] = None,
324
+ position_ids: Optional[torch.LongTensor] = None,
325
+ past_key_value: Optional[Cache] = None,
326
+ output_attentions: bool = False,
327
+ use_cache: bool = False,
328
+ cache_position: Optional[torch.LongTensor] = None,
329
+ **kwargs,
330
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
331
+ bsz, q_len, _ = hidden_states.size()
332
+
333
+ if self.config.pretraining_tp > 1:
334
+ key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
335
+ query_slices = self.q_proj.weight.split(
336
+ (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
337
+ )
338
+ key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
339
+ value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
340
+
341
+ query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
342
+ query_states = torch.cat(query_states, dim=-1)
343
+
344
+ key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
345
+ key_states = torch.cat(key_states, dim=-1)
346
+
347
+ value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
348
+ value_states = torch.cat(value_states, dim=-1)
349
+
350
+ else:
351
+ query_states = self.q_proj(hidden_states)
352
+ key_states = self.k_proj(hidden_states)
353
+ value_states = self.v_proj(hidden_states)
354
+
355
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
356
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
357
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
358
+
359
+ past_key_value = getattr(self, "past_key_value", past_key_value)
360
+ cos, sin = self.rotary_emb(value_states, position_ids)
361
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
362
+
363
+ if past_key_value is not None:
364
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
365
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
366
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
367
+
368
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
369
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
370
+
371
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
372
+
373
+ if attention_mask is not None: # no matter the length, we just slice it
374
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
375
+ attn_weights = attn_weights + causal_mask
376
+
377
+ # upcast attention to fp32
378
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
379
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
380
+ attn_output = torch.matmul(attn_weights, value_states)
381
+
382
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
383
+ raise ValueError(
384
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
385
+ f" {attn_output.size()}"
386
+ )
387
+
388
+ attn_output = attn_output.transpose(1, 2).contiguous()
389
+
390
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
391
+
392
+ if self.config.pretraining_tp > 1:
393
+ attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
394
+ o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
395
+ attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
396
+ else:
397
+ attn_output = self.o_proj(attn_output)
398
+
399
+ if not output_attentions:
400
+ attn_weights = None
401
+
402
+ return attn_output, attn_weights, past_key_value
403
+
404
+
405
+ class LlamaFlashAttention2(LlamaAttention):
406
+ """
407
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
408
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
409
+ flash attention and deal with padding tokens in case the input contains any of them.
410
+ """
411
+
412
+ def __init__(self, *args, **kwargs):
413
+ super().__init__(*args, **kwargs)
414
+
415
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
416
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
417
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
418
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
419
+
420
+ def forward(
421
+ self,
422
+ hidden_states: torch.Tensor,
423
+ attention_mask: Optional[torch.LongTensor] = None,
424
+ position_ids: Optional[torch.LongTensor] = None,
425
+ past_key_value: Optional[Cache] = None,
426
+ output_attentions: bool = False,
427
+ use_cache: bool = False,
428
+ cache_position: Optional[torch.LongTensor] = None,
429
+ **kwargs,
430
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
431
+ output_attentions = False
432
+
433
+ bsz, q_len, _ = hidden_states.size()
434
+
435
+ query_states = self.q_proj(hidden_states)
436
+ key_states = self.k_proj(hidden_states)
437
+ value_states = self.v_proj(hidden_states)
438
+
439
+ # Flash attention requires the input to have the shape
440
+ # batch_size x seq_length x head_dim x hidden_dim
441
+ # therefore we just need to keep the original shape
442
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
443
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
444
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
445
+
446
+ cos, sin = self.rotary_emb(value_states, position_ids)
447
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
448
+
449
+ past_key_value = getattr(self, "past_key_value", past_key_value)
450
+
451
+ if past_key_value is not None:
452
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
453
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
454
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
455
+
456
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
457
+ # to be able to avoid many of these transpose/reshape/view.
458
+ query_states = query_states.transpose(1, 2)
459
+ key_states = key_states.transpose(1, 2)
460
+ value_states = value_states.transpose(1, 2)
461
+
462
+ dropout_rate = self.attention_dropout if self.training else 0.0
463
+
464
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
465
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
466
+ # cast them back in the correct dtype just to be sure everything works as expected.
467
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
468
+ # in fp32. (LlamaRMSNorm handles it correctly)
469
+
470
+ input_dtype = query_states.dtype
471
+ if input_dtype == torch.float32:
472
+ if torch.is_autocast_enabled():
473
+ target_dtype = torch.get_autocast_gpu_dtype()
474
+ # Handle the case where the model is quantized
475
+ elif hasattr(self.config, "_pre_quantization_dtype"):
476
+ target_dtype = self.config._pre_quantization_dtype
477
+ else:
478
+ target_dtype = self.q_proj.weight.dtype
479
+
480
+ logger.warning_once(
481
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
482
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
483
+ f" {target_dtype}."
484
+ )
485
+
486
+ query_states = query_states.to(target_dtype)
487
+ key_states = key_states.to(target_dtype)
488
+ value_states = value_states.to(target_dtype)
489
+
490
+ attn_output = self._flash_attention_forward(
491
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
492
+ )
493
+
494
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
495
+ attn_output = self.o_proj(attn_output)
496
+
497
+ if not output_attentions:
498
+ attn_weights = None
499
+
500
+ return attn_output, attn_weights, past_key_value
501
+
502
+ def _flash_attention_forward(
503
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
504
+ ):
505
+ """
506
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
507
+ first unpad the input, then computes the attention scores and pad the final attention scores.
508
+
509
+ Args:
510
+ query_states (`torch.Tensor`):
511
+ Input query states to be passed to Flash Attention API
512
+ key_states (`torch.Tensor`):
513
+ Input key states to be passed to Flash Attention API
514
+ value_states (`torch.Tensor`):
515
+ Input value states to be passed to Flash Attention API
516
+ attention_mask (`torch.Tensor`):
517
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
518
+ position of padding tokens and 1 for the position of non-padding tokens.
519
+ dropout (`float`):
520
+ Attention dropout
521
+ softmax_scale (`float`, *optional*):
522
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
523
+ """
524
+ if not self._flash_attn_uses_top_left_mask:
525
+ causal = self.is_causal
526
+ else:
527
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
528
+ causal = self.is_causal and query_length != 1
529
+
530
+ # Contains at least one padding token in the sequence
531
+ if attention_mask is not None:
532
+ batch_size = query_states.shape[0]
533
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
534
+ query_states, key_states, value_states, attention_mask, query_length
535
+ )
536
+
537
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
538
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
539
+
540
+ attn_output_unpad = flash_attn_varlen_func(
541
+ query_states,
542
+ key_states,
543
+ value_states,
544
+ cu_seqlens_q=cu_seqlens_q,
545
+ cu_seqlens_k=cu_seqlens_k,
546
+ max_seqlen_q=max_seqlen_in_batch_q,
547
+ max_seqlen_k=max_seqlen_in_batch_k,
548
+ dropout_p=dropout,
549
+ softmax_scale=softmax_scale,
550
+ causal=causal,
551
+ )
552
+
553
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
554
+ else:
555
+ attn_output = flash_attn_func(
556
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
557
+ )
558
+
559
+ return attn_output
560
+
561
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
562
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
563
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
564
+
565
+ key_layer = index_first_axis(
566
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
567
+ )
568
+ value_layer = index_first_axis(
569
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
570
+ )
571
+ if query_length == kv_seq_len:
572
+ query_layer = index_first_axis(
573
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
574
+ )
575
+ cu_seqlens_q = cu_seqlens_k
576
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
577
+ indices_q = indices_k
578
+ elif query_length == 1:
579
+ max_seqlen_in_batch_q = 1
580
+ cu_seqlens_q = torch.arange(
581
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
582
+ ) # There is a memcpy here, that is very bad.
583
+ indices_q = cu_seqlens_q[:-1]
584
+ query_layer = query_layer.squeeze(1)
585
+ else:
586
+ # The -q_len: slice assumes left padding.
587
+ attention_mask = attention_mask[:, -query_length:]
588
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
589
+
590
+ return (
591
+ query_layer,
592
+ key_layer,
593
+ value_layer,
594
+ indices_q,
595
+ (cu_seqlens_q, cu_seqlens_k),
596
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
597
+ )
598
+
599
+
600
+ class LlamaSdpaAttention(LlamaAttention):
601
+ """
602
+ Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
603
+ `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
604
+ SDPA API.
605
+ """
606
+
607
+ # Adapted from LlamaAttention.forward
608
+ def forward(
609
+ self,
610
+ hidden_states: torch.Tensor,
611
+ attention_mask: Optional[torch.Tensor] = None,
612
+ position_ids: Optional[torch.LongTensor] = None,
613
+ past_key_value: Optional[Cache] = None,
614
+ output_attentions: bool = False,
615
+ use_cache: bool = False,
616
+ cache_position: Optional[torch.LongTensor] = None,
617
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
618
+ if output_attentions:
619
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
620
+ logger.warning_once(
621
+ "LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
622
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
623
+ )
624
+ return super().forward(
625
+ hidden_states=hidden_states,
626
+ attention_mask=attention_mask,
627
+ position_ids=position_ids,
628
+ past_key_value=past_key_value,
629
+ output_attentions=output_attentions,
630
+ use_cache=use_cache,
631
+ cache_position=cache_position,
632
+ )
633
+
634
+ bsz, q_len, _ = hidden_states.size()
635
+
636
+ query_states = self.q_proj(hidden_states)
637
+ key_states = self.k_proj(hidden_states)
638
+ value_states = self.v_proj(hidden_states)
639
+
640
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
641
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
642
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
643
+
644
+ cos, sin = self.rotary_emb(value_states, position_ids)
645
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
646
+
647
+ # In case static cache is used, it is an instance attribute.
648
+ past_key_value = getattr(self, "past_key_value", past_key_value)
649
+
650
+ if past_key_value is not None:
651
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
652
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
653
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
654
+
655
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
656
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
657
+
658
+ causal_mask = attention_mask
659
+ if attention_mask is not None:
660
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
661
+
662
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
663
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
664
+ if query_states.device.type == "cuda" and causal_mask is not None:
665
+ query_states = query_states.contiguous()
666
+ key_states = key_states.contiguous()
667
+ value_states = value_states.contiguous()
668
+
669
+ # In case we are not compiling, we may set `causal_mask` to None, which is required to dispatch to SDPA's Flash Attention 2 backend, rather
670
+ # relying on the `is_causal` argument.
671
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
672
+ query_states,
673
+ key_states,
674
+ value_states,
675
+ attn_mask=causal_mask,
676
+ dropout_p=self.attention_dropout if self.training else 0.0,
677
+ is_causal=causal_mask is None and q_len > 1,
678
+ )
679
+
680
+ attn_output = attn_output.transpose(1, 2).contiguous()
681
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
682
+
683
+ attn_output = self.o_proj(attn_output)
684
+
685
+ return attn_output, None, past_key_value
686
+
687
+
688
+ LLAMA_ATTENTION_CLASSES = {
689
+ "eager": LlamaAttention,
690
+ "flash_attention_2": LlamaFlashAttention2,
691
+ "sdpa": LlamaSdpaAttention,
692
+ }
693
+
694
+
695
+ class LlamaDecoderLayer(nn.Module):
696
+ def __init__(self, config: LlamaConfig, layer_idx: int):
697
+ super().__init__()
698
+ self.hidden_size = config.hidden_size
699
+
700
+ self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
701
+
702
+ self.mlp = LlamaMLP(config)
703
+ self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
704
+ self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
705
+
706
+ def forward(
707
+ self,
708
+ hidden_states: torch.Tensor,
709
+ attention_mask: Optional[torch.Tensor] = None,
710
+ position_ids: Optional[torch.LongTensor] = None,
711
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
712
+ output_attentions: Optional[bool] = False,
713
+ use_cache: Optional[bool] = False,
714
+ cache_position: Optional[torch.LongTensor] = None,
715
+ **kwargs,
716
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
717
+ """
718
+ Args:
719
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
720
+ attention_mask (`torch.FloatTensor`, *optional*):
721
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
722
+ query_sequence_length, key_sequence_length)` if default attention is used.
723
+ output_attentions (`bool`, *optional*):
724
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
725
+ returned tensors for more detail.
726
+ use_cache (`bool`, *optional*):
727
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
728
+ (see `past_key_values`).
729
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
730
+ """
731
+ if "padding_mask" in kwargs:
732
+ warnings.warn(
733
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
734
+ )
735
+
736
+ residual = hidden_states
737
+
738
+ hidden_states = self.input_layernorm(hidden_states)
739
+
740
+ # Self Attention
741
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
742
+ hidden_states=hidden_states,
743
+ attention_mask=attention_mask,
744
+ position_ids=position_ids,
745
+ past_key_value=past_key_value,
746
+ output_attentions=output_attentions,
747
+ use_cache=use_cache,
748
+ cache_position=cache_position,
749
+ **kwargs,
750
+ )
751
+ hidden_states = residual + hidden_states
752
+
753
+ # Fully Connected
754
+ residual = hidden_states
755
+ hidden_states = self.post_attention_layernorm(hidden_states)
756
+ hidden_states = self.mlp(hidden_states)
757
+ hidden_states = residual + hidden_states
758
+
759
+ outputs = (hidden_states,)
760
+
761
+ if output_attentions:
762
+ outputs += (self_attn_weights,)
763
+
764
+ if use_cache:
765
+ outputs += (present_key_value,)
766
+
767
+ return outputs
768
+
769
+
770
+ LLAMA_START_DOCSTRING = r"""
771
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
772
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
773
+ etc.)
774
+
775
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
776
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
777
+ and behavior.
778
+
779
+ Parameters:
780
+ config ([`LlamaConfig`]):
781
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
782
+ load the weights associated with the model, only the configuration. Check out the
783
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
784
+ """
785
+
786
+
787
+ @add_start_docstrings(
788
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
789
+ LLAMA_START_DOCSTRING,
790
+ )
791
+ class LlamaPreTrainedModel(PreTrainedModel):
792
+ config_class = LlamaConfig
793
+ base_model_prefix = "model"
794
+ supports_gradient_checkpointing = True
795
+ _no_split_modules = ["LlamaDecoderLayer"]
796
+ _skip_keys_device_placement = ["past_key_values"]
797
+ _supports_flash_attn_2 = True
798
+ _supports_sdpa = True
799
+ _supports_cache_class = True
800
+
801
+ def _init_weights(self, module):
802
+ std = self.config.initializer_range
803
+ if isinstance(module, nn.Linear):
804
+ module.weight.data.normal_(mean=0.0, std=std)
805
+ if module.bias is not None:
806
+ module.bias.data.zero_()
807
+ elif isinstance(module, nn.Embedding):
808
+ module.weight.data.normal_(mean=0.0, std=std)
809
+ if module.padding_idx is not None:
810
+ module.weight.data[module.padding_idx].zero_()
811
+
812
+ def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None):
813
+ if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
814
+ raise ValueError(
815
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
816
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
817
+ )
818
+
819
+ for layer in self.model.layers:
820
+ device = layer.input_layernorm.weight.device
821
+ if hasattr(self.config, "_pre_quantization_dtype"):
822
+ dtype = self.config._pre_quantization_dtype
823
+ else:
824
+ dtype = layer.self_attn.o_proj.weight.dtype
825
+ layer.self_attn.past_key_value = cache_cls(
826
+ self.config, max_batch_size, max_cache_len, device=device, dtype=dtype
827
+ )
828
+
829
+ def _reset_cache(self):
830
+ for layer in self.model.layers:
831
+ layer.self_attn.past_key_value = None
832
+
833
+
834
+ LLAMA_INPUTS_DOCSTRING = r"""
835
+ Args:
836
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
837
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
838
+ it.
839
+
840
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
841
+ [`PreTrainedTokenizer.__call__`] for details.
842
+
843
+ [What are input IDs?](../glossary#input-ids)
844
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
845
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
846
+
847
+ - 1 for tokens that are **not masked**,
848
+ - 0 for tokens that are **masked**.
849
+
850
+ [What are attention masks?](../glossary#attention-mask)
851
+
852
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
853
+ [`PreTrainedTokenizer.__call__`] for details.
854
+
855
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
856
+ `past_key_values`).
857
+
858
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
859
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
860
+ information on the default strategy.
861
+
862
+ - 1 indicates the head is **not masked**,
863
+ - 0 indicates the head is **masked**.
864
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
865
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
866
+ config.n_positions - 1]`.
867
+
868
+ [What are position IDs?](../glossary#position-ids)
869
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
870
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
871
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
872
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
873
+
874
+ Two formats are allowed:
875
+ - a [`~cache_utils.Cache`] instance;
876
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
877
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
878
+ cache format.
879
+
880
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
881
+ legacy cache format will be returned.
882
+
883
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
884
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
885
+ of shape `(batch_size, sequence_length)`.
886
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
887
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
888
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
889
+ model's internal embedding lookup matrix.
890
+ use_cache (`bool`, *optional*):
891
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
892
+ `past_key_values`).
893
+ output_attentions (`bool`, *optional*):
894
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
895
+ tensors for more detail.
896
+ output_hidden_states (`bool`, *optional*):
897
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
898
+ more detail.
899
+ return_dict (`bool`, *optional*):
900
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
901
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
902
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
903
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
904
+ the complete sequence length.
905
+ """
906
+
907
+
908
+ @add_start_docstrings(
909
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
910
+ LLAMA_START_DOCSTRING,
911
+ )
912
+ class LlamaModel(LlamaPreTrainedModel):
913
+ """
914
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
915
+
916
+ Args:
917
+ config: LlamaConfig
918
+ """
919
+
920
+ def __init__(self, config: LlamaConfig):
921
+ super().__init__(config)
922
+ self.padding_idx = config.pad_token_id
923
+ self.vocab_size = config.vocab_size
924
+
925
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
926
+ self.layers = nn.ModuleList(
927
+ [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
928
+ )
929
+ self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
930
+ self.gradient_checkpointing = False
931
+
932
+ # Initialize weights and apply final processing
933
+ self.post_init()
934
+
935
+ def get_input_embeddings(self):
936
+ return self.embed_tokens
937
+
938
+ def set_input_embeddings(self, value):
939
+ self.embed_tokens = value
940
+
941
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
942
+ def forward(
943
+ self,
944
+ input_ids: torch.LongTensor = None,
945
+ attention_mask: Optional[torch.Tensor] = None,
946
+ position_ids: Optional[torch.LongTensor] = None,
947
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
948
+ inputs_embeds: Optional[torch.FloatTensor] = None,
949
+ use_cache: Optional[bool] = None,
950
+ output_attentions: Optional[bool] = None,
951
+ output_hidden_states: Optional[bool] = None,
952
+ return_dict: Optional[bool] = None,
953
+ cache_position: Optional[torch.LongTensor] = None,
954
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
955
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
956
+ output_hidden_states = (
957
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
958
+ )
959
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
960
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
961
+
962
+ if (input_ids is None) ^ (inputs_embeds is not None):
963
+ raise ValueError(
964
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
965
+ )
966
+
967
+ if self.gradient_checkpointing and self.training and use_cache:
968
+ logger.warning_once(
969
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
970
+ )
971
+ use_cache = False
972
+
973
+ if inputs_embeds is None:
974
+ inputs_embeds = self.embed_tokens(input_ids)
975
+
976
+ past_seen_tokens = 0
977
+ if use_cache: # kept for BC (cache positions)
978
+ if not isinstance(past_key_values, StaticCache):
979
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
980
+ past_seen_tokens = past_key_values.get_seq_length()
981
+
982
+ if cache_position is None:
983
+ if isinstance(past_key_values, StaticCache):
984
+ raise ValueError("cache_position is a required argument when using StaticCache.")
985
+ cache_position = torch.arange(
986
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
987
+ )
988
+
989
+ if position_ids is None:
990
+ position_ids = cache_position.unsqueeze(0)
991
+
992
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_seen_tokens)
993
+
994
+ # embed positions
995
+ hidden_states = inputs_embeds
996
+
997
+ # decoder layers
998
+ all_hidden_states = () if output_hidden_states else None
999
+ all_self_attns = () if output_attentions else None
1000
+ next_decoder_cache = None
1001
+
1002
+ for decoder_layer in self.layers:
1003
+ if output_hidden_states:
1004
+ all_hidden_states += (hidden_states,)
1005
+
1006
+ if self.gradient_checkpointing and self.training:
1007
+ layer_outputs = self._gradient_checkpointing_func(
1008
+ decoder_layer.__call__,
1009
+ hidden_states,
1010
+ causal_mask,
1011
+ position_ids,
1012
+ past_key_values,
1013
+ output_attentions,
1014
+ use_cache,
1015
+ cache_position,
1016
+ )
1017
+ else:
1018
+ layer_outputs = decoder_layer(
1019
+ hidden_states,
1020
+ attention_mask=causal_mask,
1021
+ position_ids=position_ids,
1022
+ past_key_value=past_key_values,
1023
+ output_attentions=output_attentions,
1024
+ use_cache=use_cache,
1025
+ cache_position=cache_position,
1026
+ )
1027
+
1028
+ hidden_states = layer_outputs[0]
1029
+
1030
+ if use_cache:
1031
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1032
+
1033
+ if output_attentions:
1034
+ all_self_attns += (layer_outputs[1],)
1035
+
1036
+ hidden_states = self.norm(hidden_states)
1037
+
1038
+ # add hidden states from the last decoder layer
1039
+ if output_hidden_states:
1040
+ all_hidden_states += (hidden_states,)
1041
+
1042
+ next_cache = None
1043
+ if use_cache:
1044
+ next_cache = (
1045
+ next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
1046
+ )
1047
+ if not return_dict:
1048
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1049
+ return BaseModelOutputWithPast(
1050
+ last_hidden_state=hidden_states,
1051
+ past_key_values=next_cache,
1052
+ hidden_states=all_hidden_states,
1053
+ attentions=all_self_attns,
1054
+ )
1055
+
1056
+ def _update_causal_mask(
1057
+ self,
1058
+ attention_mask: torch.Tensor,
1059
+ input_tensor: torch.Tensor,
1060
+ cache_position: torch.Tensor,
1061
+ past_seen_tokens: int,
1062
+ ):
1063
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
1064
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
1065
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
1066
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
1067
+
1068
+ if self.config._attn_implementation == "flash_attention_2":
1069
+ if attention_mask is not None and 0.0 in attention_mask:
1070
+ return attention_mask
1071
+ return None
1072
+
1073
+ if self.config._attn_implementation == "sdpa":
1074
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument,
1075
+ # in order to dispatch on Flash Attention 2.
1076
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1077
+ attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens
1078
+ ):
1079
+ return None
1080
+
1081
+ dtype, device = input_tensor.dtype, input_tensor.device
1082
+ min_dtype = torch.finfo(dtype).min
1083
+ sequence_length = input_tensor.shape[1]
1084
+ if hasattr(getattr(self.layers[0], "self_attn", {}), "past_key_value"): # static cache
1085
+ target_length = self.config.max_position_embeddings
1086
+ else: # dynamic cache
1087
+ target_length = (
1088
+ attention_mask.shape[-1]
1089
+ if isinstance(attention_mask, torch.Tensor)
1090
+ else past_seen_tokens + sequence_length + 1
1091
+ )
1092
+
1093
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
1094
+ if sequence_length != 1:
1095
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1096
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1097
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
1098
+ if attention_mask is not None:
1099
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1100
+ if attention_mask.dim() == 2:
1101
+ mask_length = attention_mask.shape[-1]
1102
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
1103
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
1104
+ elif attention_mask.dim() == 4:
1105
+ # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with
1106
+ # cache. In that case, the 4D attention mask attends to the newest tokens only.
1107
+ if attention_mask.shape[-2] < cache_position[0] + sequence_length:
1108
+ offset = cache_position[0]
1109
+ else:
1110
+ offset = 0
1111
+ mask_shape = attention_mask.shape
1112
+ mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
1113
+ causal_mask[
1114
+ : mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
1115
+ ] = mask_slice
1116
+
1117
+ if (
1118
+ self.config._attn_implementation == "sdpa"
1119
+ and attention_mask is not None
1120
+ and attention_mask.device.type == "cuda"
1121
+ ):
1122
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1123
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1124
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1125
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1126
+
1127
+ return causal_mask
1128
+
1129
+
1130
+ class LlamaForCausalLM(LlamaPreTrainedModel):
1131
+ _tied_weights_keys = ["lm_head.weight"]
1132
+
1133
+ def __init__(self, config):
1134
+ super().__init__(config)
1135
+ self.model = LlamaModel(config)
1136
+ self.vocab_size = config.vocab_size
1137
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1138
+
1139
+ # Initialize weights and apply final processing
1140
+ self.post_init()
1141
+
1142
+ def get_input_embeddings(self):
1143
+ return self.model.embed_tokens
1144
+
1145
+ def set_input_embeddings(self, value):
1146
+ self.model.embed_tokens = value
1147
+
1148
+ def get_output_embeddings(self):
1149
+ return self.lm_head
1150
+
1151
+ def set_output_embeddings(self, new_embeddings):
1152
+ self.lm_head = new_embeddings
1153
+
1154
+ def set_decoder(self, decoder):
1155
+ self.model = decoder
1156
+
1157
+ def get_decoder(self):
1158
+ return self.model
1159
+
1160
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1161
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1162
+ def forward(
1163
+ self,
1164
+ input_ids: torch.LongTensor = None,
1165
+ attention_mask: Optional[torch.Tensor] = None,
1166
+ position_ids: Optional[torch.LongTensor] = None,
1167
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1168
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1169
+ labels: Optional[torch.LongTensor] = None,
1170
+ use_cache: Optional[bool] = None,
1171
+ output_attentions: Optional[bool] = None,
1172
+ output_hidden_states: Optional[bool] = None,
1173
+ return_dict: Optional[bool] = None,
1174
+ cache_position: Optional[torch.LongTensor] = None,
1175
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1176
+ r"""
1177
+ Args:
1178
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1179
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1180
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1181
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1182
+
1183
+ Returns:
1184
+
1185
+ Example:
1186
+
1187
+ ```python
1188
+ >>> from transformers import AutoTokenizer, LlamaForCausalLM
1189
+
1190
+ >>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
1191
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
1192
+
1193
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1194
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1195
+
1196
+ >>> # Generate
1197
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1198
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1199
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1200
+ ```"""
1201
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1202
+ output_hidden_states = (
1203
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1204
+ )
1205
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1206
+
1207
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1208
+ outputs = self.model(
1209
+ input_ids=input_ids,
1210
+ attention_mask=attention_mask,
1211
+ position_ids=position_ids,
1212
+ past_key_values=past_key_values,
1213
+ inputs_embeds=inputs_embeds,
1214
+ use_cache=use_cache,
1215
+ output_attentions=output_attentions,
1216
+ output_hidden_states=output_hidden_states,
1217
+ return_dict=return_dict,
1218
+ cache_position=cache_position,
1219
+ )
1220
+
1221
+ hidden_states = outputs[0]
1222
+ if self.config.pretraining_tp > 1:
1223
+ lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
1224
+ logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
1225
+ logits = torch.cat(logits, dim=-1)
1226
+ else:
1227
+ logits = self.lm_head(hidden_states)
1228
+ logits = logits.float()
1229
+
1230
+ loss = None
1231
+ if labels is not None:
1232
+ # Shift so that tokens < n predict n
1233
+ shift_logits = logits[..., :-1, :].contiguous()
1234
+ shift_labels = labels[..., 1:].contiguous()
1235
+ # Flatten the tokens
1236
+ loss_fct = CrossEntropyLoss()
1237
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1238
+ shift_labels = shift_labels.view(-1)
1239
+ # Enable model parallelism
1240
+ shift_labels = shift_labels.to(shift_logits.device)
1241
+ loss = loss_fct(shift_logits, shift_labels)
1242
+
1243
+ if not return_dict:
1244
+ output = (logits,) + outputs[1:]
1245
+ return (loss,) + output if loss is not None else output
1246
+
1247
+ return CausalLMOutputWithPast(
1248
+ loss=loss,
1249
+ logits=logits,
1250
+ past_key_values=outputs.past_key_values,
1251
+ hidden_states=outputs.hidden_states,
1252
+ attentions=outputs.attentions,
1253
+ )
1254
+
1255
+ def prepare_inputs_for_generation(
1256
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
1257
+ ):
1258
+ # With static cache, the `past_key_values` is None
1259
+ # TODO joao: standardize interface for the different Cache classes and remove of this if
1260
+ has_static_cache = False
1261
+ if past_key_values is None:
1262
+ past_key_values = getattr(getattr(self.model.layers[0], "self_attn", {}), "past_key_value", None)
1263
+ has_static_cache = past_key_values is not None
1264
+
1265
+ past_length = 0
1266
+ if past_key_values is not None:
1267
+ if isinstance(past_key_values, Cache):
1268
+ past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length()
1269
+ max_cache_length = (
1270
+ torch.tensor(past_key_values.get_max_length(), device=input_ids.device)
1271
+ if past_key_values.get_max_length() is not None
1272
+ else None
1273
+ )
1274
+ cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length)
1275
+ # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects
1276
+ else:
1277
+ cache_length = past_length = past_key_values[0][0].shape[2]
1278
+ max_cache_length = None
1279
+
1280
+ # Keep only the unprocessed tokens:
1281
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1282
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1283
+ # input)
1284
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1285
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1286
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1287
+ # input_ids based on the past_length.
1288
+ elif past_length < input_ids.shape[1]:
1289
+ input_ids = input_ids[:, past_length:]
1290
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1291
+
1292
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1293
+ if (
1294
+ max_cache_length is not None
1295
+ and attention_mask is not None
1296
+ and cache_length + input_ids.shape[1] > max_cache_length
1297
+ ):
1298
+ attention_mask = attention_mask[:, -max_cache_length:]
1299
+
1300
+ position_ids = kwargs.get("position_ids", None)
1301
+ if attention_mask is not None and position_ids is None:
1302
+ # create position_ids on the fly for batch generation
1303
+ position_ids = attention_mask.long().cumsum(-1) - 1
1304
+ position_ids.masked_fill_(attention_mask == 0, 1)
1305
+ if past_key_values:
1306
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1307
+
1308
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1309
+ if inputs_embeds is not None and past_key_values is None:
1310
+ model_inputs = {"inputs_embeds": inputs_embeds}
1311
+ else:
1312
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
1313
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
1314
+ # TODO: use `next_tokens` directly instead.
1315
+ model_inputs = {"input_ids": input_ids.contiguous()}
1316
+
1317
+ input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
1318
+ if cache_position is None:
1319
+ cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
1320
+ else:
1321
+ cache_position = cache_position[-input_length:]
1322
+
1323
+ if has_static_cache:
1324
+ past_key_values = None
1325
+
1326
+ model_inputs.update(
1327
+ {
1328
+ "position_ids": position_ids,
1329
+ "cache_position": cache_position,
1330
+ "past_key_values": past_key_values,
1331
+ "use_cache": kwargs.get("use_cache"),
1332
+ "attention_mask": attention_mask,
1333
+ }
1334
+ )
1335
+ return model_inputs
1336
+
1337
+ @staticmethod
1338
+ def _reorder_cache(past_key_values, beam_idx):
1339
+ reordered_past = ()
1340
+ for layer_past in past_key_values:
1341
+ reordered_past += (
1342
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1343
+ )
1344
+ return reordered_past
1345
+
1346
+
1347
+ @add_start_docstrings(
1348
+ """
1349
+ The LLaMa Model transformer with a sequence classification head on top (linear layer).
1350
+
1351
+ [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1352
+ (e.g. GPT-2) do.
1353
+
1354
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1355
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1356
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1357
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1358
+ each row of the batch).
1359
+ """,
1360
+ LLAMA_START_DOCSTRING,
1361
+ )
1362
+ class LlamaForSequenceClassification(LlamaPreTrainedModel):
1363
+ def __init__(self, config):
1364
+ super().__init__(config)
1365
+ self.num_labels = config.num_labels
1366
+ self.model = LlamaModel(config)
1367
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1368
+
1369
+ # Initialize weights and apply final processing
1370
+ self.post_init()
1371
+
1372
+ def get_input_embeddings(self):
1373
+ return self.model.embed_tokens
1374
+
1375
+ def set_input_embeddings(self, value):
1376
+ self.model.embed_tokens = value
1377
+
1378
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1379
+ def forward(
1380
+ self,
1381
+ input_ids: torch.LongTensor = None,
1382
+ attention_mask: Optional[torch.Tensor] = None,
1383
+ position_ids: Optional[torch.LongTensor] = None,
1384
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1385
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1386
+ labels: Optional[torch.LongTensor] = None,
1387
+ use_cache: Optional[bool] = None,
1388
+ output_attentions: Optional[bool] = None,
1389
+ output_hidden_states: Optional[bool] = None,
1390
+ return_dict: Optional[bool] = None,
1391
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1392
+ r"""
1393
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1394
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1395
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1396
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1397
+ """
1398
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1399
+
1400
+ transformer_outputs = self.model(
1401
+ input_ids,
1402
+ attention_mask=attention_mask,
1403
+ position_ids=position_ids,
1404
+ past_key_values=past_key_values,
1405
+ inputs_embeds=inputs_embeds,
1406
+ use_cache=use_cache,
1407
+ output_attentions=output_attentions,
1408
+ output_hidden_states=output_hidden_states,
1409
+ return_dict=return_dict,
1410
+ )
1411
+ hidden_states = transformer_outputs[0]
1412
+ logits = self.score(hidden_states)
1413
+
1414
+ if input_ids is not None:
1415
+ batch_size = input_ids.shape[0]
1416
+ else:
1417
+ batch_size = inputs_embeds.shape[0]
1418
+
1419
+ if self.config.pad_token_id is None and batch_size != 1:
1420
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1421
+ if self.config.pad_token_id is None:
1422
+ sequence_lengths = -1
1423
+ else:
1424
+ if input_ids is not None:
1425
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1426
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1427
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1428
+ sequence_lengths = sequence_lengths.to(logits.device)
1429
+ else:
1430
+ sequence_lengths = -1
1431
+
1432
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1433
+
1434
+ loss = None
1435
+ if labels is not None:
1436
+ labels = labels.to(logits.device)
1437
+ if self.config.problem_type is None:
1438
+ if self.num_labels == 1:
1439
+ self.config.problem_type = "regression"
1440
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1441
+ self.config.problem_type = "single_label_classification"
1442
+ else:
1443
+ self.config.problem_type = "multi_label_classification"
1444
+
1445
+ if self.config.problem_type == "regression":
1446
+ loss_fct = MSELoss()
1447
+ if self.num_labels == 1:
1448
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1449
+ else:
1450
+ loss = loss_fct(pooled_logits, labels)
1451
+ elif self.config.problem_type == "single_label_classification":
1452
+ loss_fct = CrossEntropyLoss()
1453
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1454
+ elif self.config.problem_type == "multi_label_classification":
1455
+ loss_fct = BCEWithLogitsLoss()
1456
+ loss = loss_fct(pooled_logits, labels)
1457
+ if not return_dict:
1458
+ output = (pooled_logits,) + transformer_outputs[1:]
1459
+ return ((loss,) + output) if loss is not None else output
1460
+
1461
+ return SequenceClassifierOutputWithPast(
1462
+ loss=loss,
1463
+ logits=pooled_logits,
1464
+ past_key_values=transformer_outputs.past_key_values,
1465
+ hidden_states=transformer_outputs.hidden_states,
1466
+ attentions=transformer_outputs.attentions,
1467
+ )
1468
+
1469
+
1470
+ @add_start_docstrings(
1471
+ """
1472
+ The Llama Model transformer with a span classification head on top for extractive question-answering tasks like
1473
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1474
+ """,
1475
+ LLAMA_START_DOCSTRING,
1476
+ )
1477
+ class LlamaForQuestionAnswering(LlamaPreTrainedModel):
1478
+ base_model_prefix = "transformer"
1479
+
1480
+ # Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Llama
1481
+ def __init__(self, config):
1482
+ super().__init__(config)
1483
+ self.transformer = LlamaModel(config)
1484
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1485
+
1486
+ # Initialize weights and apply final processing
1487
+ self.post_init()
1488
+
1489
+ def get_input_embeddings(self):
1490
+ return self.transformer.embed_tokens
1491
+
1492
+ def set_input_embeddings(self, value):
1493
+ self.transformer.embed_tokens = value
1494
+
1495
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1496
+ def forward(
1497
+ self,
1498
+ input_ids: Optional[torch.LongTensor] = None,
1499
+ attention_mask: Optional[torch.FloatTensor] = None,
1500
+ position_ids: Optional[torch.LongTensor] = None,
1501
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1502
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1503
+ start_positions: Optional[torch.LongTensor] = None,
1504
+ end_positions: Optional[torch.LongTensor] = None,
1505
+ output_attentions: Optional[bool] = None,
1506
+ output_hidden_states: Optional[bool] = None,
1507
+ return_dict: Optional[bool] = None,
1508
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1509
+ r"""
1510
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1511
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1512
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1513
+ are not taken into account for computing the loss.
1514
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1515
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1516
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1517
+ are not taken into account for computing the loss.
1518
+ """
1519
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1520
+
1521
+ outputs = self.transformer(
1522
+ input_ids,
1523
+ attention_mask=attention_mask,
1524
+ position_ids=position_ids,
1525
+ past_key_values=past_key_values,
1526
+ inputs_embeds=inputs_embeds,
1527
+ output_attentions=output_attentions,
1528
+ output_hidden_states=output_hidden_states,
1529
+ return_dict=return_dict,
1530
+ )
1531
+
1532
+ sequence_output = outputs[0]
1533
+
1534
+ logits = self.qa_outputs(sequence_output)
1535
+ start_logits, end_logits = logits.split(1, dim=-1)
1536
+ start_logits = start_logits.squeeze(-1).contiguous()
1537
+ end_logits = end_logits.squeeze(-1).contiguous()
1538
+
1539
+ total_loss = None
1540
+ if start_positions is not None and end_positions is not None:
1541
+ # If we are on multi-GPU, split add a dimension
1542
+ if len(start_positions.size()) > 1:
1543
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1544
+ if len(end_positions.size()) > 1:
1545
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1546
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1547
+ ignored_index = start_logits.size(1)
1548
+ start_positions = start_positions.clamp(0, ignored_index)
1549
+ end_positions = end_positions.clamp(0, ignored_index)
1550
+
1551
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1552
+ start_loss = loss_fct(start_logits, start_positions)
1553
+ end_loss = loss_fct(end_logits, end_positions)
1554
+ total_loss = (start_loss + end_loss) / 2
1555
+
1556
+ if not return_dict:
1557
+ output = (start_logits, end_logits) + outputs[2:]
1558
+ return ((total_loss,) + output) if total_loss is not None else output
1559
+
1560
+ return QuestionAnsweringModelOutput(
1561
+ loss=total_loss,
1562
+ start_logits=start_logits,
1563
+ end_logits=end_logits,
1564
+ hidden_states=outputs.hidden_states,
1565
+ attentions=outputs.attentions,
1566
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ """Tokenization classes for LLaMA."""
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+
28
+ from ...convert_slow_tokenizer import import_protobuf
29
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
30
+ from ...utils import logging
31
+
32
+
33
+ if TYPE_CHECKING:
34
+ from ...tokenization_utils_base import TextInput
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
39
+
40
+ SPIECE_UNDERLINE = "▁"
41
+
42
+ B_INST, E_INST = "[INST]", "[/INST]"
43
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
44
+
45
+ # fmt: off
46
+ DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
47
+ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
48
+ that your responses are socially unbiased and positive in nature.
49
+
50
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
51
+ correct. If you don't know the answer to a question, please don't share false information."""
52
+ # fmt: on
53
+
54
+
55
+ class LlamaTokenizer(PreTrainedTokenizer):
56
+ """
57
+ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
58
+ no padding token in the original model.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ Path to the vocabulary file.
63
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
64
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
65
+ token instead.
66
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
67
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
68
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
69
+ The end of sequence token.
70
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*):
71
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
72
+ attention mechanisms or loss computation.
73
+ sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
74
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
75
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
76
+ to set:
77
+
78
+ - `enable_sampling`: Enable subword regularization.
79
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
80
+
81
+ - `nbest_size = {0,1}`: No sampling is performed.
82
+ - `nbest_size > 1`: samples from the nbest_size results.
83
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
84
+ using forward-filtering-and-backward-sampling algorithm.
85
+
86
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
87
+ BPE-dropout.
88
+
89
+ add_bos_token (`bool`, *optional*, defaults to `True`):
90
+ Whether or not to add an `bos_token` at the start of sequences.
91
+ add_eos_token (`bool`, *optional*, defaults to `False`):
92
+ Whether or not to add an `eos_token` at the end of sequences.
93
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
94
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
95
+ extra spaces.
96
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
97
+ Whether or not the default system prompt for Llama should be used.
98
+ spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
99
+ Whether or not to add spaces between special tokens.
100
+ legacy (`bool`, *optional*):
101
+ Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
102
+ and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple
103
+ example:
104
+
105
+ - `legacy=True`:
106
+ ```python
107
+ >>> from transformers import T5Tokenizer
108
+
109
+ >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=True)
110
+ >>> tokenizer.encode("Hello <extra_id_0>.")
111
+ [8774, 32099, 3, 5, 1]
112
+ ```
113
+ - `legacy=False`:
114
+ ```python
115
+ >>> from transformers import T5Tokenizer
116
+
117
+ >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=False)
118
+ >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
119
+ [8774, 32099, 5, 1]
120
+ ```
121
+ Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
122
+ add_prefix_space (`bool`, *optional*, defaults to `True`):
123
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
124
+ other word.
125
+
126
+ """
127
+
128
+ vocab_files_names = VOCAB_FILES_NAMES
129
+ model_input_names = ["input_ids", "attention_mask"]
130
+
131
+ def __init__(
132
+ self,
133
+ vocab_file,
134
+ unk_token="<unk>",
135
+ bos_token="<s>",
136
+ eos_token="</s>",
137
+ pad_token=None,
138
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
139
+ add_bos_token=True,
140
+ add_eos_token=False,
141
+ clean_up_tokenization_spaces=False,
142
+ use_default_system_prompt=False,
143
+ spaces_between_special_tokens=False,
144
+ legacy=None,
145
+ add_prefix_space=True,
146
+ **kwargs,
147
+ ):
148
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
149
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
150
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
151
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
152
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
153
+
154
+ if legacy is None:
155
+ logger.warning_once(
156
+ f"You are using the default legacy behaviour of the {self.__class__}. This is"
157
+ " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
158
+ " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
159
+ " means, and thoroughly read the reason why this was added as explained in"
160
+ " https://github.com/huggingface/transformers/pull/24565"
161
+ )
162
+ legacy = True
163
+
164
+ self.legacy = legacy
165
+ self.vocab_file = vocab_file
166
+ self.add_bos_token = add_bos_token
167
+ self.add_eos_token = add_eos_token
168
+ self.use_default_system_prompt = use_default_system_prompt
169
+ self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
170
+ self.add_prefix_space = add_prefix_space
171
+
172
+ super().__init__(
173
+ bos_token=bos_token,
174
+ eos_token=eos_token,
175
+ unk_token=unk_token,
176
+ pad_token=pad_token,
177
+ add_bos_token=add_bos_token,
178
+ add_eos_token=add_eos_token,
179
+ sp_model_kwargs=self.sp_model_kwargs,
180
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
181
+ use_default_system_prompt=use_default_system_prompt,
182
+ spaces_between_special_tokens=spaces_between_special_tokens,
183
+ legacy=legacy,
184
+ add_prefix_space=add_prefix_space,
185
+ **kwargs,
186
+ )
187
+
188
+ @property
189
+ def unk_token_length(self):
190
+ return len(self.sp_model.encode(str(self.unk_token)))
191
+
192
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
193
+ def get_spm_processor(self, from_slow=False):
194
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
195
+ if self.legacy or from_slow: # no dependency on protobuf
196
+ tokenizer.Load(self.vocab_file)
197
+ return tokenizer
198
+
199
+ with open(self.vocab_file, "rb") as f:
200
+ sp_model = f.read()
201
+ model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
202
+ model = model_pb2.ModelProto.FromString(sp_model)
203
+ normalizer_spec = model_pb2.NormalizerSpec()
204
+ normalizer_spec.add_dummy_prefix = False
205
+ model.normalizer_spec.MergeFrom(normalizer_spec)
206
+ sp_model = model.SerializeToString()
207
+ tokenizer.LoadFromSerializedProto(sp_model)
208
+ return tokenizer
209
+
210
+ def __getstate__(self):
211
+ state = self.__dict__.copy()
212
+ state["sp_model"] = None
213
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
214
+ return state
215
+
216
+ def __setstate__(self, d):
217
+ self.__dict__ = d
218
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
219
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
220
+
221
+ @property
222
+ def vocab_size(self):
223
+ """Returns vocab size"""
224
+ return self.sp_model.get_piece_size()
225
+
226
+ def get_vocab(self):
227
+ """Returns vocab as a dict"""
228
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
229
+ vocab.update(self.added_tokens_encoder)
230
+ return vocab
231
+
232
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
233
+ def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
234
+ """
235
+ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
236
+ first token is special.
237
+ """
238
+ if self.legacy or len(text) == 0:
239
+ return super().tokenize(text, **kwargs)
240
+
241
+ text = text.replace(SPIECE_UNDERLINE, " ")
242
+ if self.add_prefix_space:
243
+ text = SPIECE_UNDERLINE + text
244
+
245
+ tokens = super().tokenize(text, **kwargs)
246
+
247
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
248
+ tokens = tokens[1:]
249
+ return tokens
250
+
251
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
252
+ def _tokenize(self, text, **kwargs):
253
+ """
254
+ Returns a tokenized string.
255
+
256
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
257
+ SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
258
+ `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
259
+ `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
260
+ `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
261
+ """
262
+ tokens = self.sp_model.encode(text, out_type=str)
263
+ if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
264
+ return tokens
265
+
266
+ # 1. Encode string + prefix ex: "<unk> Hey"
267
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
268
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
269
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
270
+
271
+ def _convert_token_to_id(self, token):
272
+ """Converts a token (str) in an id using the vocab."""
273
+ return self.sp_model.piece_to_id(token)
274
+
275
+ def _convert_id_to_token(self, index):
276
+ """Converts an index (integer) in a token (str) using the vocab."""
277
+ token = self.sp_model.IdToPiece(index)
278
+ return token
279
+
280
+ def convert_tokens_to_string(self, tokens):
281
+ """Converts a sequence of tokens (string) in a single string."""
282
+ # since we manually add the prefix space, we have to remove it when decoding
283
+ if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
284
+ tokens[0] = tokens[0][1:]
285
+
286
+ current_sub_tokens = []
287
+ out_string = ""
288
+ prev_is_special = False
289
+ for i, token in enumerate(tokens):
290
+ # make sure that special tokens are not decoded using sentencepiece model
291
+ if token in self.all_special_tokens:
292
+ if not prev_is_special and i != 0 and self.legacy:
293
+ out_string += " "
294
+ out_string += self.sp_model.decode(current_sub_tokens) + token
295
+ prev_is_special = True
296
+ current_sub_tokens = []
297
+ else:
298
+ if prev_is_special and i == 1 and self.add_prefix_space and not token.startswith(SPIECE_UNDERLINE):
299
+ out_string += " "
300
+ current_sub_tokens.append(token)
301
+ prev_is_special = False
302
+ out_string += self.sp_model.decode(current_sub_tokens)
303
+ return out_string
304
+
305
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
306
+ """
307
+ Save the vocabulary and special tokens file to a directory.
308
+
309
+ Args:
310
+ save_directory (`str`):
311
+ The directory in which to save the vocabulary.
312
+
313
+ Returns:
314
+ `Tuple(str)`: Paths to the files saved.
315
+ """
316
+ if not os.path.isdir(save_directory):
317
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
318
+ return
319
+ out_vocab_file = os.path.join(
320
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
321
+ )
322
+
323
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
324
+ copyfile(self.vocab_file, out_vocab_file)
325
+ elif not os.path.isfile(self.vocab_file):
326
+ with open(out_vocab_file, "wb") as fi:
327
+ content_spiece_model = self.sp_model.serialized_model_proto()
328
+ fi.write(content_spiece_model)
329
+
330
+ return (out_vocab_file,)
331
+
332
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
333
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
334
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
335
+
336
+ output = bos_token_id + token_ids_0 + eos_token_id
337
+
338
+ if token_ids_1 is not None:
339
+ output = output + bos_token_id + token_ids_1 + eos_token_id
340
+
341
+ return output
342
+
343
+ def get_special_tokens_mask(
344
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
345
+ ) -> List[int]:
346
+ """
347
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
348
+ special tokens using the tokenizer `prepare_for_model` method.
349
+
350
+ Args:
351
+ token_ids_0 (`List[int]`):
352
+ List of IDs.
353
+ token_ids_1 (`List[int]`, *optional*):
354
+ Optional second list of IDs for sequence pairs.
355
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
356
+ Whether or not the token list is already formatted with special tokens for the model.
357
+
358
+ Returns:
359
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
360
+ """
361
+ if already_has_special_tokens:
362
+ return super().get_special_tokens_mask(
363
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
364
+ )
365
+
366
+ bos_token_id = [1] if self.add_bos_token else []
367
+ eos_token_id = [1] if self.add_eos_token else []
368
+
369
+ if token_ids_1 is None:
370
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
371
+ return (
372
+ bos_token_id
373
+ + ([0] * len(token_ids_0))
374
+ + eos_token_id
375
+ + bos_token_id
376
+ + ([0] * len(token_ids_1))
377
+ + eos_token_id
378
+ )
379
+
380
+ def create_token_type_ids_from_sequences(
381
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
382
+ ) -> List[int]:
383
+ """
384
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
385
+ sequence pair mask has the following format:
386
+
387
+ ```
388
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
389
+ | first sequence | second sequence |
390
+ ```
391
+
392
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
393
+
394
+ Args:
395
+ token_ids_0 (`List[int]`):
396
+ List of ids.
397
+ token_ids_1 (`List[int]`, *optional*):
398
+ Optional second list of IDs for sequence pairs.
399
+
400
+ Returns:
401
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
402
+ """
403
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
404
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
405
+
406
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
407
+
408
+ if token_ids_1 is not None:
409
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
410
+
411
+ return output
412
+
413
+ @property
414
+ def default_chat_template(self):
415
+ """
416
+ LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.
417
+ Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict
418
+ user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
419
+ rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
420
+ results in an unusual token ordering when it is present. This template should definitely be changed if you wish
421
+ to fine-tune a model with more flexible role ordering!
422
+
423
+ The output should look something like:
424
+
425
+ <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos><bos>[INST] Prompt [/INST] Answer <eos>
426
+ <bos>[INST] Prompt [/INST]
427
+
428
+ The reference for this chat template is [this code
429
+ snippet](https://github.com/facebookresearch/llama/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/llama/generation.py#L320-L362)
430
+ in the original repository.
431
+ """
432
+ logger.warning_once(
433
+ "\nNo chat template is defined for this tokenizer - using the default template "
434
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
435
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
436
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
437
+ )
438
+ template = (
439
+ "{% if messages[0]['role'] == 'system' %}"
440
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
441
+ "{% set system_message = messages[0]['content'] %}"
442
+ "{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}"
443
+ "{% set loop_messages = messages %}" # Or use the default system message if the flag is set
444
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
445
+ "{% else %}"
446
+ "{% set loop_messages = messages %}"
447
+ "{% set system_message = false %}"
448
+ "{% endif %}"
449
+ "{% for message in loop_messages %}" # Loop over all non-system messages
450
+ "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
451
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
452
+ "{% endif %}"
453
+ "{% if loop.index0 == 0 and system_message != false %}" # Embed system message in first message
454
+ "{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}"
455
+ "{% else %}"
456
+ "{% set content = message['content'] %}"
457
+ "{% endif %}"
458
+ "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
459
+ "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
460
+ "{% elif message['role'] == 'system' %}"
461
+ "{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}"
462
+ "{% elif message['role'] == 'assistant' %}"
463
+ "{{ ' ' + content.strip() + ' ' + eos_token }}"
464
+ "{% endif %}"
465
+ "{% endfor %}"
466
+ )
467
+ template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
468
+ default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
469
+ template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
470
+
471
+ return template
llmeval-env/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama_fast.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+ from shutil import copyfile
17
+ from typing import Optional, Tuple
18
+
19
+ from tokenizers import processors
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from ...utils import is_sentencepiece_available, logging
23
+ from ...utils.versions import require_version
24
+
25
+
26
+ require_version("tokenizers>=0.13.3")
27
+
28
+ if is_sentencepiece_available():
29
+ from .tokenization_llama import LlamaTokenizer
30
+ else:
31
+ LlamaTokenizer = None
32
+
33
+ logger = logging.get_logger(__name__)
34
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"}
35
+
36
+ B_INST, E_INST = "[INST]", "[/INST]"
37
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
38
+
39
+ # fmt: off
40
+ DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
41
+ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
42
+ that your responses are socially unbiased and positive in nature.
43
+
44
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
45
+ correct. If you don't know the answer to a question, please don't share false information."""
46
+ # fmt: on
47
+
48
+
49
+ class LlamaTokenizerFast(PreTrainedTokenizerFast):
50
+ """
51
+ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.
52
+
53
+ This uses notably ByteFallback and no normalization.
54
+
55
+ ```python
56
+ >>> from transformers import LlamaTokenizerFast
57
+
58
+ >>> tokenizer = LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer")
59
+ >>> tokenizer.encode("Hello this is a test")
60
+ [1, 15043, 445, 338, 263, 1243]
61
+ ```
62
+
63
+ If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
64
+ call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
65
+ values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
66
+ [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
67
+
68
+
69
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
70
+ refer to this superclass for more information regarding those methods.
71
+
72
+ Args:
73
+ vocab_file (`str`, *optional*):
74
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
75
+ contains the vocabulary necessary to instantiate a tokenizer.
76
+ tokenizer_file (`str`, *optional*):
77
+ [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
78
+ contains everything needed to load the tokenizer.
79
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
80
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
81
+ extra spaces.
82
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
83
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
84
+ token instead.
85
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
86
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
87
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
88
+ The end of sequence token.
89
+ add_bos_token (`bool`, *optional*, defaults to `True`):
90
+ Whether or not to add an `bos_token` at the start of sequences.
91
+ add_eos_token (`bool`, *optional*, defaults to `False`):
92
+ Whether or not to add an `eos_token` at the end of sequences.
93
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
94
+ Whether or not the default system prompt for Llama should be used.
95
+ add_prefix_space (`bool`, *optional*):
96
+ Whether or not the tokenizer should automatically add a prefix space
97
+ """
98
+
99
+ vocab_files_names = VOCAB_FILES_NAMES
100
+ slow_tokenizer_class = LlamaTokenizer
101
+ padding_side = "left"
102
+ model_input_names = ["input_ids", "attention_mask"]
103
+
104
+ def __init__(
105
+ self,
106
+ vocab_file=None,
107
+ tokenizer_file=None,
108
+ clean_up_tokenization_spaces=False,
109
+ unk_token="<unk>",
110
+ bos_token="<s>",
111
+ eos_token="</s>",
112
+ add_bos_token=True,
113
+ add_eos_token=False,
114
+ use_default_system_prompt=False,
115
+ add_prefix_space=None,
116
+ **kwargs,
117
+ ):
118
+ if add_prefix_space is not None:
119
+ logger.warning_once(
120
+ "You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers"
121
+ )
122
+ kwargs["from_slow"] = True
123
+
124
+ super().__init__(
125
+ vocab_file=vocab_file,
126
+ tokenizer_file=tokenizer_file,
127
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
128
+ unk_token=unk_token,
129
+ bos_token=bos_token,
130
+ eos_token=eos_token,
131
+ add_bos_token=add_bos_token,
132
+ add_eos_token=add_eos_token,
133
+ use_default_system_prompt=use_default_system_prompt,
134
+ **kwargs,
135
+ )
136
+ self._add_bos_token = add_bos_token
137
+ self._add_eos_token = add_eos_token
138
+ self.update_post_processor()
139
+ self.use_default_system_prompt = use_default_system_prompt
140
+ self.vocab_file = vocab_file
141
+
142
+ @property
143
+ def can_save_slow_tokenizer(self) -> bool:
144
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
145
+
146
+ def update_post_processor(self):
147
+ """
148
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
149
+ """
150
+ bos = self.bos_token
151
+ bos_token_id = self.bos_token_id
152
+ if bos is None and self.add_bos_token:
153
+ raise ValueError("add_bos_token = True but bos_token = None")
154
+
155
+ eos = self.eos_token
156
+ eos_token_id = self.eos_token_id
157
+ if eos is None and self.add_eos_token:
158
+ raise ValueError("add_eos_token = True but eos_token = None")
159
+
160
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
161
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
162
+
163
+ special_tokens = []
164
+ if self.add_bos_token:
165
+ special_tokens.append((bos, bos_token_id))
166
+ if self.add_eos_token:
167
+ special_tokens.append((eos, eos_token_id))
168
+ self._tokenizer.post_processor = processors.TemplateProcessing(
169
+ single=single, pair=pair, special_tokens=special_tokens
170
+ )
171
+
172
+ @property
173
+ def add_eos_token(self):
174
+ return self._add_eos_token
175
+
176
+ @property
177
+ def add_bos_token(self):
178
+ return self._add_bos_token
179
+
180
+ @add_eos_token.setter
181
+ def add_eos_token(self, value):
182
+ self._add_eos_token = value
183
+ self.update_post_processor()
184
+
185
+ @add_bos_token.setter
186
+ def add_bos_token(self, value):
187
+ self._add_bos_token = value
188
+ self.update_post_processor()
189
+
190
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
191
+ if not self.can_save_slow_tokenizer:
192
+ raise ValueError(
193
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
194
+ "tokenizer."
195
+ )
196
+
197
+ if not os.path.isdir(save_directory):
198
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
199
+ return
200
+ out_vocab_file = os.path.join(
201
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
202
+ )
203
+
204
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
205
+ copyfile(self.vocab_file, out_vocab_file)
206
+
207
+ return (out_vocab_file,)
208
+
209
+ @property
210
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.default_chat_template
211
+ def default_chat_template(self):
212
+ """
213
+ LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.
214
+ Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict
215
+ user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
216
+ rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
217
+ results in an unusual token ordering when it is present. This template should definitely be changed if you wish
218
+ to fine-tune a model with more flexible role ordering!
219
+
220
+ The output should look something like:
221
+
222
+ <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos><bos>[INST] Prompt [/INST] Answer <eos>
223
+ <bos>[INST] Prompt [/INST]
224
+
225
+ The reference for this chat template is [this code
226
+ snippet](https://github.com/facebookresearch/llama/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/llama/generation.py#L320-L362)
227
+ in the original repository.
228
+ """
229
+ logger.warning_once(
230
+ "\nNo chat template is defined for this tokenizer - using the default template "
231
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
232
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
233
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
234
+ )
235
+ template = (
236
+ "{% if messages[0]['role'] == 'system' %}"
237
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
238
+ "{% set system_message = messages[0]['content'] %}"
239
+ "{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}"
240
+ "{% set loop_messages = messages %}" # Or use the default system message if the flag is set
241
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
242
+ "{% else %}"
243
+ "{% set loop_messages = messages %}"
244
+ "{% set system_message = false %}"
245
+ "{% endif %}"
246
+ "{% for message in loop_messages %}" # Loop over all non-system messages
247
+ "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
248
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
249
+ "{% endif %}"
250
+ "{% if loop.index0 == 0 and system_message != false %}" # Embed system message in first message
251
+ "{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}"
252
+ "{% else %}"
253
+ "{% set content = message['content'] %}"
254
+ "{% endif %}"
255
+ "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
256
+ "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
257
+ "{% elif message['role'] == 'system' %}"
258
+ "{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}"
259
+ "{% elif message['role'] == 'assistant' %}"
260
+ "{{ ' ' + content.strip() + ' ' + eos_token }}"
261
+ "{% endif %}"
262
+ "{% endfor %}"
263
+ )
264
+ template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
265
+ default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
266
+ template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
267
+
268
+ return template
269
+
270
+ # TODO ArthurZ let's rely on the template processor instead, refactor all fast tokenizers
271
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens
272
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
273
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
274
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
275
+
276
+ output = bos_token_id + token_ids_0 + eos_token_id
277
+
278
+ if token_ids_1 is not None:
279
+ output = output + bos_token_id + token_ids_1 + eos_token_id
280
+
281
+ return output
llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/configuration_mt5.cpython-310.pyc ADDED
Binary file (6.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_mt5.cpython-310.pyc ADDED
Binary file (65.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/configuration_mt5.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, The T5 Authors and HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ mT5 model configuration"""
16
+ from typing import Mapping
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...onnx import OnnxSeq2SeqConfigWithPast
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class MT5Config(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`MT5Model`] or a [`TFMT5Model`]. It is used to
29
+ instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a
30
+ configuration with the defaults will yield a similar configuration to that of the mT5
31
+ [google/mt5-small](https://huggingface.co/google/mt5-small) architecture.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+ Arguments:
37
+ vocab_size (`int`, *optional*, defaults to 250112):
38
+ Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`].
40
+ d_model (`int`, *optional*, defaults to 512):
41
+ Size of the encoder layers and the pooler layer.
42
+ d_kv (`int`, *optional*, defaults to 64):
43
+ Size of the key, query, value projections per attention head. In the conventional context, it is typically expected that `d_kv` has to be equal to `d_model // num_heads`.
44
+ But in the architecture of mt5-small, `d_kv` is not equal to `d_model //num_heads`. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`.
45
+ d_ff (`int`, *optional*, defaults to 1024):
46
+ Size of the intermediate feed forward layer in each `T5Block`.
47
+ num_layers (`int`, *optional*, defaults to 8):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_decoder_layers (`int`, *optional*):
50
+ Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
51
+ num_heads (`int`, *optional*, defaults to 6):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
54
+ The number of buckets to use for each attention layer.
55
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
56
+ The maximum distance of the longer sequences for the bucket separation.
57
+ dropout_rate (`float`, *optional*, defaults to 0.1):
58
+ The ratio for all dropout layers.
59
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio for classifier.
61
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
62
+ The epsilon used by the layer normalization layers.
63
+ initializer_factor (`float`, *optional*, defaults to 1):
64
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
65
+ testing).
66
+ feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`):
67
+ Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`.
68
+ use_cache (`bool`, *optional*, defaults to `True`):
69
+ Whether or not the model should return the last key/values attentions (not used by all models).
70
+ """
71
+
72
+ model_type = "mt5"
73
+ keys_to_ignore_at_inference = ["past_key_values"]
74
+ attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
75
+
76
+ def __init__(
77
+ self,
78
+ vocab_size=250112,
79
+ d_model=512,
80
+ d_kv=64,
81
+ d_ff=1024,
82
+ num_layers=8,
83
+ num_decoder_layers=None,
84
+ num_heads=6,
85
+ relative_attention_num_buckets=32,
86
+ relative_attention_max_distance=128,
87
+ dropout_rate=0.1,
88
+ layer_norm_epsilon=1e-6,
89
+ initializer_factor=1.0,
90
+ feed_forward_proj="gated-gelu",
91
+ is_encoder_decoder=True,
92
+ use_cache=True,
93
+ tokenizer_class="T5Tokenizer",
94
+ tie_word_embeddings=False,
95
+ pad_token_id=0,
96
+ eos_token_id=1,
97
+ decoder_start_token_id=0,
98
+ classifier_dropout=0.0,
99
+ **kwargs,
100
+ ):
101
+ self.vocab_size = vocab_size
102
+ self.d_model = d_model
103
+ self.d_kv = d_kv
104
+ self.d_ff = d_ff
105
+ self.num_layers = num_layers
106
+ self.num_decoder_layers = (
107
+ num_decoder_layers if num_decoder_layers is not None else self.num_layers
108
+ ) # default = symmetry
109
+ self.num_heads = num_heads
110
+ self.relative_attention_num_buckets = relative_attention_num_buckets
111
+ self.relative_attention_max_distance = relative_attention_max_distance
112
+ self.dropout_rate = dropout_rate
113
+ self.classifier_dropout = classifier_dropout
114
+ self.layer_norm_epsilon = layer_norm_epsilon
115
+ self.initializer_factor = initializer_factor
116
+ self.feed_forward_proj = feed_forward_proj
117
+ self.use_cache = use_cache
118
+
119
+ act_info = self.feed_forward_proj.split("-")
120
+ self.dense_act_fn = act_info[-1]
121
+ self.is_gated_act = act_info[0] == "gated"
122
+
123
+ if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
124
+ raise ValueError(
125
+ f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
126
+ "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
127
+ "'gated-gelu' or 'relu'"
128
+ )
129
+
130
+ # for backwards compatibility
131
+ if feed_forward_proj == "gated-gelu":
132
+ self.dense_act_fn = "gelu_new"
133
+
134
+ super().__init__(
135
+ is_encoder_decoder=is_encoder_decoder,
136
+ tokenizer_class=tokenizer_class,
137
+ tie_word_embeddings=tie_word_embeddings,
138
+ pad_token_id=pad_token_id,
139
+ eos_token_id=eos_token_id,
140
+ decoder_start_token_id=decoder_start_token_id,
141
+ **kwargs,
142
+ )
143
+
144
+
145
+ class MT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
146
+ @property
147
+ # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
148
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
149
+ common_inputs = {
150
+ "input_ids": {0: "batch", 1: "encoder_sequence"},
151
+ "attention_mask": {0: "batch", 1: "encoder_sequence"},
152
+ }
153
+ if self.use_past:
154
+ common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
155
+ common_inputs["decoder_input_ids"] = {0: "batch"}
156
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
157
+ else:
158
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
159
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
160
+
161
+ if self.use_past:
162
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
163
+
164
+ return common_inputs
165
+
166
+ @property
167
+ # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
168
+ def default_onnx_opset(self) -> int:
169
+ return 13
170
+
171
+ @property
172
+ def atol_for_validation(self) -> float:
173
+ return 5e-4
llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/modeling_flax_mt5.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flax mT5 model."""
16
+
17
+ import jax.numpy as jnp
18
+
19
+ from ...utils import logging
20
+ from ..t5.modeling_flax_t5 import FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model
21
+ from .configuration_mt5 import MT5Config
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ _CONFIG_FOR_DOC = "T5Config"
27
+
28
+
29
+ # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
30
+ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
31
+ """
32
+ Shift input ids one token to the right.
33
+ """
34
+ shifted_input_ids = jnp.zeros_like(input_ids)
35
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
36
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
37
+
38
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
39
+ return shifted_input_ids
40
+
41
+
42
+ class FlaxMT5Model(FlaxT5Model):
43
+ r"""
44
+ This class overrides [`FlaxT5Model`]. Please check the superclass for the appropriate documentation alongside usage
45
+ examples.
46
+
47
+ Examples:
48
+
49
+ ```python
50
+ >>> from transformers import FlaxMT5Model, AutoTokenizer
51
+
52
+ >>> model = FlaxMT5Model.from_pretrained("google/mt5-small")
53
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
54
+
55
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
56
+ >>> summary = "Weiter Verhandlung in Syrien."
57
+ >>> inputs = tokenizer(article, return_tensors="np")
58
+
59
+ >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids
60
+
61
+ >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=decoder_input_ids)
62
+ >>> hidden_states = outputs.last_hidden_state
63
+ ```"""
64
+
65
+ model_type = "mt5"
66
+ config_class = MT5Config
67
+
68
+
69
+ class FlaxMT5EncoderModel(FlaxT5EncoderModel):
70
+ r"""
71
+ This class overrides [`FlaxT5EncoderModel`]. Please check the superclass for the appropriate documentation
72
+ alongside usage examples.
73
+
74
+ Examples:
75
+
76
+ ```python
77
+ >>> from transformers import FlaxT5EncoderModel, AutoTokenizer
78
+
79
+ >>> model = FlaxT5EncoderModel.from_pretrained("google/mt5-small")
80
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
81
+
82
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
83
+ >>> summary = "Weiter Verhandlung in Syrien."
84
+ >>> inputs = tokenizer(article, return_tensors="np")
85
+
86
+ >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids
87
+
88
+ >>> outputs = model(input_ids=inputs["input_ids"])
89
+ >>> hidden_states = outputs.last_hidden_state
90
+ ```"""
91
+
92
+ model_type = "mt5"
93
+ config_class = MT5Config
94
+
95
+
96
+ class FlaxMT5ForConditionalGeneration(FlaxT5ForConditionalGeneration):
97
+ r"""
98
+ This class overrides [`FlaxT5ForConditionalGeneration`]. Please check the superclass for the appropriate
99
+ documentation alongside usage examples.
100
+
101
+ Examples:
102
+
103
+ ```python
104
+ >>> from transformers import FlaxMT5ForConditionalGeneration, AutoTokenizer
105
+
106
+ >>> model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
107
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
108
+
109
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
110
+ >>> summary = "Weiter Verhandlung in Syrien."
111
+ >>> inputs = tokenizer(article, return_tensors="np")
112
+
113
+ >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids
114
+
115
+ >>> outputs = model(**inputs, decoder_input_ids=decoder_input_ids)
116
+ >>> logits = outputs.logits
117
+ ```"""
118
+
119
+ model_type = "mt5"
120
+ config_class = MT5Config
llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/modeling_mt5.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mt5/modeling_tf_mt5.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tensorflow mT5 model."""
16
+
17
+ from ...utils import logging
18
+ from ..t5.modeling_tf_t5 import TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model
19
+ from .configuration_mt5 import MT5Config
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+ _CONFIG_FOR_DOC = "T5Config"
25
+
26
+
27
+ class TFMT5Model(TFT5Model):
28
+ r"""
29
+ This class overrides [`TFT5Model`]. Please check the superclass for the appropriate documentation alongside usage
30
+ examples.
31
+
32
+ Examples:
33
+
34
+ ```python
35
+ >>> from transformers import TFMT5Model, AutoTokenizer
36
+
37
+ >>> model = TFMT5Model.from_pretrained("google/mt5-small")
38
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
39
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
40
+ >>> summary = "Weiter Verhandlung in Syrien."
41
+ >>> inputs = tokenizer(article, return_tensors="tf")
42
+ >>> labels = tokenizer(text_target=summary, return_tensors="tf")
43
+
44
+ >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"])
45
+ >>> hidden_states = outputs.last_hidden_state
46
+ ```"""
47
+
48
+ model_type = "mt5"
49
+ config_class = MT5Config
50
+
51
+
52
+ class TFMT5ForConditionalGeneration(TFT5ForConditionalGeneration):
53
+ r"""
54
+ This class overrides [`TFT5ForConditionalGeneration`]. Please check the superclass for the appropriate
55
+ documentation alongside usage examples.
56
+
57
+ Examples:
58
+
59
+ ```python
60
+ >>> from transformers import TFMT5ForConditionalGeneration, AutoTokenizer
61
+
62
+ >>> model = TFMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
63
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
64
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
65
+ >>> summary = "Weiter Verhandlung in Syrien."
66
+ >>> inputs = tokenizer(article, text_target=summary, return_tensors="tf")
67
+
68
+ >>> outputs = model(**inputs)
69
+ >>> loss = outputs.loss
70
+ ```"""
71
+
72
+ model_type = "mt5"
73
+ config_class = MT5Config
74
+
75
+
76
+ class TFMT5EncoderModel(TFT5EncoderModel):
77
+ r"""
78
+ This class overrides [`TFT5EncoderModel`]. Please check the superclass for the appropriate documentation alongside
79
+ usage examples.
80
+
81
+ Examples:
82
+
83
+ ```python
84
+ >>> from transformers import TFMT5EncoderModel, AutoTokenizer
85
+
86
+ >>> model = TFMT5EncoderModel.from_pretrained("google/mt5-small")
87
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
88
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
89
+ >>> input_ids = tokenizer(article, return_tensors="tf").input_ids
90
+ >>> outputs = model(input_ids)
91
+ >>> hidden_state = outputs.last_hidden_state
92
+ ```"""
93
+
94
+ model_type = "mt5"
95
+ config_class = MT5Config
llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/configuration_siglip.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/convert_siglip_to_hf.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/modeling_siglip.cpython-310.pyc ADDED
Binary file (43.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/processing_siglip.cpython-310.pyc ADDED
Binary file (6.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/siglip/__pycache__/tokenization_siglip.cpython-310.pyc ADDED
Binary file (13.3 kB). View file