applied-ai-018 commited on
Commit
1bf242c
·
verified ·
1 Parent(s): e2a1d53

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__init__.py +85 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/configuration_conditional_detr.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/feature_extraction_conditional_detr.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/image_processing_conditional_detr.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/configuration_conditional_detr.py +273 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py +325 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/feature_extraction_conditional_detr.py +43 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/image_processing_conditional_detr.py +1777 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/modeling_conditional_detr.py +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__init__.py +77 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/__init__.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/configuration_fastspeech2_conformer.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_hifigan.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_model_with_hifigan.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/modeling_fastspeech2_conformer.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/tokenization_fastspeech2_conformer.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +482 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.py +210 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_hifigan.py +134 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py +102 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +1684 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +184 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__init__.py +73 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/configuration_fuyu.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/convert_fuyu_model_weights_to_hf.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/image_processing_fuyu.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/configuration_fuyu.py +211 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py +134 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/image_processing_fuyu.py +736 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/modeling_fuyu.py +358 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/processing_fuyu.py +694 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__init__.py +63 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/__init__.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/convert_nougat_to_hf.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/image_processing_nougat.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/processing_nougat.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/tokenization_nougat_fast.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/convert_nougat_to_hf.py +282 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/image_processing_nougat.py +532 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/processing_nougat.py +160 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/tokenization_nougat_fast.py +625 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/swin/__init__.py +86 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/swin/__pycache__/__init__.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/swin/__pycache__/configuration_swin.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__init__.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_conditional_detr": [
22
+ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
23
+ "ConditionalDetrConfig",
24
+ "ConditionalDetrOnnxConfig",
25
+ ]
26
+ }
27
+
28
+ try:
29
+ if not is_vision_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["feature_extraction_conditional_detr"] = ["ConditionalDetrFeatureExtractor"]
35
+ _import_structure["image_processing_conditional_detr"] = ["ConditionalDetrImageProcessor"]
36
+
37
+ try:
38
+ if not is_torch_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_conditional_detr"] = [
44
+ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
45
+ "ConditionalDetrForObjectDetection",
46
+ "ConditionalDetrForSegmentation",
47
+ "ConditionalDetrModel",
48
+ "ConditionalDetrPreTrainedModel",
49
+ ]
50
+
51
+
52
+ if TYPE_CHECKING:
53
+ from .configuration_conditional_detr import (
54
+ CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
55
+ ConditionalDetrConfig,
56
+ ConditionalDetrOnnxConfig,
57
+ )
58
+
59
+ try:
60
+ if not is_vision_available():
61
+ raise OptionalDependencyNotAvailable()
62
+ except OptionalDependencyNotAvailable:
63
+ pass
64
+ else:
65
+ from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
66
+ from .image_processing_conditional_detr import ConditionalDetrImageProcessor
67
+
68
+ try:
69
+ if not is_torch_available():
70
+ raise OptionalDependencyNotAvailable()
71
+ except OptionalDependencyNotAvailable:
72
+ pass
73
+ else:
74
+ from .modeling_conditional_detr import (
75
+ CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
76
+ ConditionalDetrForObjectDetection,
77
+ ConditionalDetrForSegmentation,
78
+ ConditionalDetrModel,
79
+ ConditionalDetrPreTrainedModel,
80
+ )
81
+
82
+ else:
83
+ import sys
84
+
85
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/configuration_conditional_detr.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (9.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/feature_extraction_conditional_detr.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/image_processing_conditional_detr.cpython-310.pyc ADDED
Binary file (59.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/configuration_conditional_detr.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Conditional DETR model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from packaging import version
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+ from ..auto import CONFIG_MAPPING
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class ConditionalDetrConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`ConditionalDetrModel`]. It is used to instantiate
36
+ a Conditional DETR model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the Conditional DETR
38
+ [microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ use_timm_backbone (`bool`, *optional*, defaults to `True`):
45
+ Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
46
+ API.
47
+ backbone_config (`PretrainedConfig` or `dict`, *optional*):
48
+ The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
49
+ case it will default to `ResNetConfig()`.
50
+ num_channels (`int`, *optional*, defaults to 3):
51
+ The number of input channels.
52
+ num_queries (`int`, *optional*, defaults to 100):
53
+ Number of object queries, i.e. detection slots. This is the maximal number of objects
54
+ [`ConditionalDetrModel`] can detect in a single image. For COCO, we recommend 100 queries.
55
+ d_model (`int`, *optional*, defaults to 256):
56
+ Dimension of the layers.
57
+ encoder_layers (`int`, *optional*, defaults to 6):
58
+ Number of encoder layers.
59
+ decoder_layers (`int`, *optional*, defaults to 6):
60
+ Number of decoder layers.
61
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
62
+ Number of attention heads for each attention layer in the Transformer encoder.
63
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
64
+ Number of attention heads for each attention layer in the Transformer decoder.
65
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
66
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
67
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
68
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
69
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
70
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
71
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
72
+ dropout (`float`, *optional*, defaults to 0.1):
73
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
74
+ attention_dropout (`float`, *optional*, defaults to 0.0):
75
+ The dropout ratio for the attention probabilities.
76
+ activation_dropout (`float`, *optional*, defaults to 0.0):
77
+ The dropout ratio for activations inside the fully connected layer.
78
+ init_std (`float`, *optional*, defaults to 0.02):
79
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
80
+ init_xavier_std (`float`, *optional*, defaults to 1):
81
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
82
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
83
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
84
+ for more details.
85
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
86
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
87
+ for more details.
88
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
89
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
90
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
91
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
92
+ backbone (`str`, *optional*, defaults to `"resnet50"`):
93
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
94
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
95
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
96
+ use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
97
+ Whether to use pretrained weights for the backbone.
98
+ backbone_kwargs (`dict`, *optional*):
99
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
100
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
101
+ dilation (`bool`, *optional*, defaults to `False`):
102
+ Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
103
+ `use_timm_backbone` = `True`.
104
+ class_cost (`float`, *optional*, defaults to 1):
105
+ Relative weight of the classification error in the Hungarian matching cost.
106
+ bbox_cost (`float`, *optional*, defaults to 5):
107
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
108
+ giou_cost (`float`, *optional*, defaults to 2):
109
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
110
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
111
+ Relative weight of the Focal loss in the panoptic segmentation loss.
112
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
113
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
114
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
115
+ Relative weight of the L1 bounding box loss in the object detection loss.
116
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
117
+ Relative weight of the generalized IoU loss in the object detection loss.
118
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
119
+ Relative classification weight of the 'no-object' class in the object detection loss.
120
+ focal_alpha (`float`, *optional*, defaults to 0.25):
121
+ Alpha parameter in the focal loss.
122
+
123
+ Examples:
124
+
125
+ ```python
126
+ >>> from transformers import ConditionalDetrConfig, ConditionalDetrModel
127
+
128
+ >>> # Initializing a Conditional DETR microsoft/conditional-detr-resnet-50 style configuration
129
+ >>> configuration = ConditionalDetrConfig()
130
+
131
+ >>> # Initializing a model (with random weights) from the microsoft/conditional-detr-resnet-50 style configuration
132
+ >>> model = ConditionalDetrModel(configuration)
133
+
134
+ >>> # Accessing the model configuration
135
+ >>> configuration = model.config
136
+ ```"""
137
+
138
+ model_type = "conditional_detr"
139
+ keys_to_ignore_at_inference = ["past_key_values"]
140
+ attribute_map = {
141
+ "hidden_size": "d_model",
142
+ "num_attention_heads": "encoder_attention_heads",
143
+ }
144
+
145
+ def __init__(
146
+ self,
147
+ use_timm_backbone=True,
148
+ backbone_config=None,
149
+ num_channels=3,
150
+ num_queries=300,
151
+ encoder_layers=6,
152
+ encoder_ffn_dim=2048,
153
+ encoder_attention_heads=8,
154
+ decoder_layers=6,
155
+ decoder_ffn_dim=2048,
156
+ decoder_attention_heads=8,
157
+ encoder_layerdrop=0.0,
158
+ decoder_layerdrop=0.0,
159
+ is_encoder_decoder=True,
160
+ activation_function="relu",
161
+ d_model=256,
162
+ dropout=0.1,
163
+ attention_dropout=0.0,
164
+ activation_dropout=0.0,
165
+ init_std=0.02,
166
+ init_xavier_std=1.0,
167
+ auxiliary_loss=False,
168
+ position_embedding_type="sine",
169
+ backbone="resnet50",
170
+ use_pretrained_backbone=True,
171
+ backbone_kwargs=None,
172
+ dilation=False,
173
+ class_cost=2,
174
+ bbox_cost=5,
175
+ giou_cost=2,
176
+ mask_loss_coefficient=1,
177
+ dice_loss_coefficient=1,
178
+ cls_loss_coefficient=2,
179
+ bbox_loss_coefficient=5,
180
+ giou_loss_coefficient=2,
181
+ focal_alpha=0.25,
182
+ **kwargs,
183
+ ):
184
+ if not use_timm_backbone and use_pretrained_backbone:
185
+ raise ValueError(
186
+ "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`"
187
+ )
188
+
189
+ if backbone_config is not None and backbone is not None:
190
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
191
+
192
+ if backbone_config is not None and use_timm_backbone:
193
+ raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
194
+
195
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
196
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
197
+
198
+ if not use_timm_backbone:
199
+ if backbone_config is None:
200
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
201
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
202
+ elif isinstance(backbone_config, dict):
203
+ backbone_model_type = backbone_config.get("model_type")
204
+ config_class = CONFIG_MAPPING[backbone_model_type]
205
+ backbone_config = config_class.from_dict(backbone_config)
206
+
207
+ self.use_timm_backbone = use_timm_backbone
208
+ self.backbone_config = backbone_config
209
+ self.num_channels = num_channels
210
+ self.num_queries = num_queries
211
+ self.d_model = d_model
212
+ self.encoder_ffn_dim = encoder_ffn_dim
213
+ self.encoder_layers = encoder_layers
214
+ self.encoder_attention_heads = encoder_attention_heads
215
+ self.decoder_ffn_dim = decoder_ffn_dim
216
+ self.decoder_layers = decoder_layers
217
+ self.decoder_attention_heads = decoder_attention_heads
218
+ self.dropout = dropout
219
+ self.attention_dropout = attention_dropout
220
+ self.activation_dropout = activation_dropout
221
+ self.activation_function = activation_function
222
+ self.init_std = init_std
223
+ self.init_xavier_std = init_xavier_std
224
+ self.encoder_layerdrop = encoder_layerdrop
225
+ self.decoder_layerdrop = decoder_layerdrop
226
+ self.num_hidden_layers = encoder_layers
227
+ self.auxiliary_loss = auxiliary_loss
228
+ self.position_embedding_type = position_embedding_type
229
+ self.backbone = backbone
230
+ self.use_pretrained_backbone = use_pretrained_backbone
231
+ self.backbone_kwargs = backbone_kwargs
232
+ self.dilation = dilation
233
+ # Hungarian matcher
234
+ self.class_cost = class_cost
235
+ self.bbox_cost = bbox_cost
236
+ self.giou_cost = giou_cost
237
+ # Loss coefficients
238
+ self.mask_loss_coefficient = mask_loss_coefficient
239
+ self.dice_loss_coefficient = dice_loss_coefficient
240
+ self.cls_loss_coefficient = cls_loss_coefficient
241
+ self.bbox_loss_coefficient = bbox_loss_coefficient
242
+ self.giou_loss_coefficient = giou_loss_coefficient
243
+ self.focal_alpha = focal_alpha
244
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
245
+
246
+ @property
247
+ def num_attention_heads(self) -> int:
248
+ return self.encoder_attention_heads
249
+
250
+ @property
251
+ def hidden_size(self) -> int:
252
+ return self.d_model
253
+
254
+
255
+ class ConditionalDetrOnnxConfig(OnnxConfig):
256
+ torch_onnx_minimum_version = version.parse("1.11")
257
+
258
+ @property
259
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
260
+ return OrderedDict(
261
+ [
262
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
263
+ ("pixel_mask", {0: "batch"}),
264
+ ]
265
+ )
266
+
267
+ @property
268
+ def atol_for_validation(self) -> float:
269
+ return 1e-5
270
+
271
+ @property
272
+ def default_onnx_opset(self) -> int:
273
+ return 12
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Conditional DETR checkpoints."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from collections import OrderedDict
21
+ from pathlib import Path
22
+
23
+ import requests
24
+ import torch
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import (
29
+ ConditionalDetrConfig,
30
+ ConditionalDetrForObjectDetection,
31
+ ConditionalDetrForSegmentation,
32
+ ConditionalDetrImageProcessor,
33
+ )
34
+ from transformers.utils import logging
35
+
36
+
37
+ logging.set_verbosity_info()
38
+ logger = logging.get_logger(__name__)
39
+
40
+ # here we list all keys to be renamed (original name on the left, our name on the right)
41
+ rename_keys = []
42
+ for i in range(6):
43
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
44
+ rename_keys.append(
45
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
46
+ )
47
+ rename_keys.append(
48
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
49
+ )
50
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
51
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
52
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
53
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
54
+ rename_keys.append(
55
+ (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
56
+ )
57
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
58
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
59
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
60
+ # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
61
+ rename_keys.append(
62
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
63
+ )
64
+ rename_keys.append(
65
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
66
+ )
67
+ rename_keys.append(
68
+ (
69
+ f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
70
+ f"decoder.layers.{i}.encoder_attn.out_proj.weight",
71
+ )
72
+ )
73
+ rename_keys.append(
74
+ (
75
+ f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
76
+ f"decoder.layers.{i}.encoder_attn.out_proj.bias",
77
+ )
78
+ )
79
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
80
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
81
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
82
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
83
+ rename_keys.append(
84
+ (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
85
+ )
86
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
87
+ rename_keys.append(
88
+ (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
89
+ )
90
+ rename_keys.append(
91
+ (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
92
+ )
93
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
94
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
95
+
96
+ # q, k, v projections in self/cross-attention in decoder for conditional DETR
97
+ rename_keys.append(
98
+ (f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
99
+ )
100
+ rename_keys.append(
101
+ (f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
102
+ )
103
+ rename_keys.append(
104
+ (f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
105
+ )
106
+ rename_keys.append(
107
+ (f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
108
+ )
109
+ rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
110
+ rename_keys.append(
111
+ (f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
112
+ )
113
+ # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
114
+ rename_keys.append(
115
+ (f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
116
+ )
117
+ rename_keys.append(
118
+ (f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
119
+ )
120
+ rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
121
+ rename_keys.append(
122
+ (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
123
+ )
124
+
125
+ rename_keys.append(
126
+ (f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
127
+ )
128
+ rename_keys.append(
129
+ (f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
130
+ )
131
+ rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
132
+ rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
133
+ rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
134
+ rename_keys.append(
135
+ (f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
136
+ )
137
+ # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
138
+ rename_keys.append(
139
+ (f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
140
+ )
141
+ rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
142
+ rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
143
+ rename_keys.append(
144
+ (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
145
+ )
146
+
147
+ # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
148
+ # for conditional DETR, also convert reference point head and query scale MLP
149
+ rename_keys.extend(
150
+ [
151
+ ("input_proj.weight", "input_projection.weight"),
152
+ ("input_proj.bias", "input_projection.bias"),
153
+ ("query_embed.weight", "query_position_embeddings.weight"),
154
+ ("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
155
+ ("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
156
+ ("class_embed.weight", "class_labels_classifier.weight"),
157
+ ("class_embed.bias", "class_labels_classifier.bias"),
158
+ ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
159
+ ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
160
+ ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
161
+ ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
162
+ ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
163
+ ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
164
+ ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
165
+ ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
166
+ ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
167
+ ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
168
+ ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
169
+ ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
170
+ ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
171
+ ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
172
+ ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
173
+ ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
174
+ ]
175
+ )
176
+
177
+
178
+ def rename_key(state_dict, old, new):
179
+ val = state_dict.pop(old)
180
+ state_dict[new] = val
181
+
182
+
183
+ def rename_backbone_keys(state_dict):
184
+ new_state_dict = OrderedDict()
185
+ for key, value in state_dict.items():
186
+ if "backbone.0.body" in key:
187
+ new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model")
188
+ new_state_dict[new_key] = value
189
+ else:
190
+ new_state_dict[key] = value
191
+
192
+ return new_state_dict
193
+
194
+
195
+ def read_in_q_k_v(state_dict, is_panoptic=False):
196
+ prefix = ""
197
+ if is_panoptic:
198
+ prefix = "conditional_detr."
199
+
200
+ # first: transformer encoder
201
+ for i in range(6):
202
+ # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
203
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
204
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
205
+ # next, add query, keys and values (in that order) to the state dict
206
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
207
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
208
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
209
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
210
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
211
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
212
+
213
+
214
+ # We will verify our results on an image of cute cats
215
+ def prepare_img():
216
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
217
+ im = Image.open(requests.get(url, stream=True).raw)
218
+
219
+ return im
220
+
221
+
222
+ @torch.no_grad()
223
+ def convert_conditional_detr_checkpoint(model_name, pytorch_dump_folder_path):
224
+ """
225
+ Copy/paste/tweak model's weights to our CONDITIONAL_DETR structure.
226
+ """
227
+
228
+ # load default config
229
+ config = ConditionalDetrConfig()
230
+ # set backbone and dilation attributes
231
+ if "resnet101" in model_name:
232
+ config.backbone = "resnet101"
233
+ if "dc5" in model_name:
234
+ config.dilation = True
235
+ is_panoptic = "panoptic" in model_name
236
+ if is_panoptic:
237
+ config.num_labels = 250
238
+ else:
239
+ config.num_labels = 91
240
+ repo_id = "huggingface/label-files"
241
+ filename = "coco-detection-id2label.json"
242
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
243
+ id2label = {int(k): v for k, v in id2label.items()}
244
+ config.id2label = id2label
245
+ config.label2id = {v: k for k, v in id2label.items()}
246
+
247
+ # load image processor
248
+ format = "coco_panoptic" if is_panoptic else "coco_detection"
249
+ image_processor = ConditionalDetrImageProcessor(format=format)
250
+
251
+ # prepare image
252
+ img = prepare_img()
253
+ encoding = image_processor(images=img, return_tensors="pt")
254
+ pixel_values = encoding["pixel_values"]
255
+
256
+ logger.info(f"Converting model {model_name}...")
257
+
258
+ # load original model from torch hub
259
+ conditional_detr = torch.hub.load("DeppMeng/ConditionalDETR", model_name, pretrained=True).eval()
260
+ state_dict = conditional_detr.state_dict()
261
+ # rename keys
262
+ for src, dest in rename_keys:
263
+ if is_panoptic:
264
+ src = "conditional_detr." + src
265
+ rename_key(state_dict, src, dest)
266
+ state_dict = rename_backbone_keys(state_dict)
267
+ # query, key and value matrices need special treatment
268
+ read_in_q_k_v(state_dict, is_panoptic=is_panoptic)
269
+ # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
270
+ prefix = "conditional_detr.model." if is_panoptic else "model."
271
+ for key in state_dict.copy().keys():
272
+ if is_panoptic:
273
+ if (
274
+ key.startswith("conditional_detr")
275
+ and not key.startswith("class_labels_classifier")
276
+ and not key.startswith("bbox_predictor")
277
+ ):
278
+ val = state_dict.pop(key)
279
+ state_dict["conditional_detr.model" + key[4:]] = val
280
+ elif "class_labels_classifier" in key or "bbox_predictor" in key:
281
+ val = state_dict.pop(key)
282
+ state_dict["conditional_detr." + key] = val
283
+ elif key.startswith("bbox_attention") or key.startswith("mask_head"):
284
+ continue
285
+ else:
286
+ val = state_dict.pop(key)
287
+ state_dict[prefix + key] = val
288
+ else:
289
+ if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
290
+ val = state_dict.pop(key)
291
+ state_dict[prefix + key] = val
292
+ # finally, create HuggingFace model and load state dict
293
+ model = ConditionalDetrForSegmentation(config) if is_panoptic else ConditionalDetrForObjectDetection(config)
294
+ model.load_state_dict(state_dict)
295
+ model.eval()
296
+ model.push_to_hub(repo_id=model_name, organization="DepuMeng", commit_message="Add model")
297
+ # verify our conversion
298
+ original_outputs = conditional_detr(pixel_values)
299
+ outputs = model(pixel_values)
300
+ assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-4)
301
+ assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-4)
302
+ if is_panoptic:
303
+ assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4)
304
+
305
+ # Save model and image processor
306
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
307
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
308
+ model.save_pretrained(pytorch_dump_folder_path)
309
+ image_processor.save_pretrained(pytorch_dump_folder_path)
310
+
311
+
312
+ if __name__ == "__main__":
313
+ parser = argparse.ArgumentParser()
314
+
315
+ parser.add_argument(
316
+ "--model_name",
317
+ default="conditional_detr_resnet50",
318
+ type=str,
319
+ help="Name of the CONDITIONAL_DETR model you'd like to convert.",
320
+ )
321
+ parser.add_argument(
322
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
323
+ )
324
+ args = parser.parse_args()
325
+ convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/feature_extraction_conditional_detr.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for Conditional DETR."""
16
+
17
+ import warnings
18
+
19
+ from ...image_transforms import rgb_to_id as _rgb_to_id
20
+ from ...utils import logging
21
+ from .image_processing_conditional_detr import ConditionalDetrImageProcessor
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ def rgb_to_id(x):
28
+ warnings.warn(
29
+ "rgb_to_id has moved and will not be importable from this module from v5. "
30
+ "Please import from transformers.image_transforms instead.",
31
+ FutureWarning,
32
+ )
33
+ return _rgb_to_id(x)
34
+
35
+
36
+ class ConditionalDetrFeatureExtractor(ConditionalDetrImageProcessor):
37
+ def __init__(self, *args, **kwargs) -> None:
38
+ warnings.warn(
39
+ "The class ConditionalDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
40
+ " Please use ConditionalDetrImageProcessor instead.",
41
+ FutureWarning,
42
+ )
43
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/image_processing_conditional_detr.py ADDED
@@ -0,0 +1,1777 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Conditional DETR."""
16
+
17
+ import io
18
+ import pathlib
19
+ from collections import defaultdict
20
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...feature_extraction_utils import BatchFeature
25
+ from ...image_processing_utils import BaseImageProcessor, get_size_dict
26
+ from ...image_transforms import (
27
+ PaddingMode,
28
+ center_to_corners_format,
29
+ corners_to_center_format,
30
+ id_to_rgb,
31
+ pad,
32
+ rescale,
33
+ resize,
34
+ rgb_to_id,
35
+ to_channel_dimension_format,
36
+ )
37
+ from ...image_utils import (
38
+ IMAGENET_DEFAULT_MEAN,
39
+ IMAGENET_DEFAULT_STD,
40
+ AnnotationFormat,
41
+ AnnotationType,
42
+ ChannelDimension,
43
+ ImageInput,
44
+ PILImageResampling,
45
+ get_image_size,
46
+ infer_channel_dimension_format,
47
+ is_scaled_image,
48
+ make_list_of_images,
49
+ to_numpy_array,
50
+ valid_images,
51
+ validate_annotations,
52
+ validate_kwargs,
53
+ validate_preprocess_arguments,
54
+ )
55
+ from ...utils import (
56
+ TensorType,
57
+ is_flax_available,
58
+ is_jax_tensor,
59
+ is_scipy_available,
60
+ is_tf_available,
61
+ is_tf_tensor,
62
+ is_torch_available,
63
+ is_torch_tensor,
64
+ is_vision_available,
65
+ logging,
66
+ )
67
+
68
+
69
+ if is_torch_available():
70
+ import torch
71
+ from torch import nn
72
+
73
+
74
+ if is_vision_available():
75
+ import PIL
76
+
77
+
78
+ if is_scipy_available():
79
+ import scipy.special
80
+ import scipy.stats
81
+
82
+
83
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
84
+
85
+
86
+ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
87
+
88
+
89
+ # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
90
+ def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
91
+ """
92
+ Computes the output image size given the input image size and the desired output size.
93
+
94
+ Args:
95
+ image_size (`Tuple[int, int]`):
96
+ The input image size.
97
+ size (`int`):
98
+ The desired output size.
99
+ max_size (`int`, *optional*):
100
+ The maximum allowed output size.
101
+ """
102
+ height, width = image_size
103
+ if max_size is not None:
104
+ min_original_size = float(min((height, width)))
105
+ max_original_size = float(max((height, width)))
106
+ if max_original_size / min_original_size * size > max_size:
107
+ size = int(round(max_size * min_original_size / max_original_size))
108
+
109
+ if (height <= width and height == size) or (width <= height and width == size):
110
+ return height, width
111
+
112
+ if width < height:
113
+ ow = size
114
+ oh = int(size * height / width)
115
+ else:
116
+ oh = size
117
+ ow = int(size * width / height)
118
+ return (oh, ow)
119
+
120
+
121
+ # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
122
+ def get_resize_output_image_size(
123
+ input_image: np.ndarray,
124
+ size: Union[int, Tuple[int, int], List[int]],
125
+ max_size: Optional[int] = None,
126
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
127
+ ) -> Tuple[int, int]:
128
+ """
129
+ Computes the output image size given the input image size and the desired output size. If the desired output size
130
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
131
+ image size is computed by keeping the aspect ratio of the input image size.
132
+
133
+ Args:
134
+ input_image (`np.ndarray`):
135
+ The image to resize.
136
+ size (`int` or `Tuple[int, int]` or `List[int]`):
137
+ The desired output size.
138
+ max_size (`int`, *optional*):
139
+ The maximum allowed output size.
140
+ input_data_format (`ChannelDimension` or `str`, *optional*):
141
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
142
+ """
143
+ image_size = get_image_size(input_image, input_data_format)
144
+ if isinstance(size, (list, tuple)):
145
+ return size
146
+
147
+ return get_size_with_aspect_ratio(image_size, size, max_size)
148
+
149
+
150
+ # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
151
+ def get_numpy_to_framework_fn(arr) -> Callable:
152
+ """
153
+ Returns a function that converts a numpy array to the framework of the input array.
154
+
155
+ Args:
156
+ arr (`np.ndarray`): The array to convert.
157
+ """
158
+ if isinstance(arr, np.ndarray):
159
+ return np.array
160
+ if is_tf_available() and is_tf_tensor(arr):
161
+ import tensorflow as tf
162
+
163
+ return tf.convert_to_tensor
164
+ if is_torch_available() and is_torch_tensor(arr):
165
+ import torch
166
+
167
+ return torch.tensor
168
+ if is_flax_available() and is_jax_tensor(arr):
169
+ import jax.numpy as jnp
170
+
171
+ return jnp.array
172
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
173
+
174
+
175
+ # Copied from transformers.models.detr.image_processing_detr.safe_squeeze
176
+ def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
177
+ """
178
+ Squeezes an array, but only if the axis specified has dim 1.
179
+ """
180
+ if axis is None:
181
+ return arr.squeeze()
182
+
183
+ try:
184
+ return arr.squeeze(axis=axis)
185
+ except ValueError:
186
+ return arr
187
+
188
+
189
+ # Copied from transformers.models.detr.image_processing_detr.normalize_annotation
190
+ def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
191
+ image_height, image_width = image_size
192
+ norm_annotation = {}
193
+ for key, value in annotation.items():
194
+ if key == "boxes":
195
+ boxes = value
196
+ boxes = corners_to_center_format(boxes)
197
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
198
+ norm_annotation[key] = boxes
199
+ else:
200
+ norm_annotation[key] = value
201
+ return norm_annotation
202
+
203
+
204
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
205
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
206
+ """
207
+ Return the maximum value across all indices of an iterable of values.
208
+ """
209
+ return [max(values_i) for values_i in zip(*values)]
210
+
211
+
212
+ # Copied from transformers.models.detr.image_processing_detr.get_max_height_width
213
+ def get_max_height_width(
214
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
215
+ ) -> List[int]:
216
+ """
217
+ Get the maximum height and width across all images in a batch.
218
+ """
219
+ if input_data_format is None:
220
+ input_data_format = infer_channel_dimension_format(images[0])
221
+
222
+ if input_data_format == ChannelDimension.FIRST:
223
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
224
+ elif input_data_format == ChannelDimension.LAST:
225
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
226
+ else:
227
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
228
+ return (max_height, max_width)
229
+
230
+
231
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
232
+ def make_pixel_mask(
233
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
234
+ ) -> np.ndarray:
235
+ """
236
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
237
+
238
+ Args:
239
+ image (`np.ndarray`):
240
+ Image to make the pixel mask for.
241
+ output_size (`Tuple[int, int]`):
242
+ Output size of the mask.
243
+ """
244
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
245
+ mask = np.zeros(output_size, dtype=np.int64)
246
+ mask[:input_height, :input_width] = 1
247
+ return mask
248
+
249
+
250
+ # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
251
+ def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
252
+ """
253
+ Convert a COCO polygon annotation to a mask.
254
+
255
+ Args:
256
+ segmentations (`List[List[float]]`):
257
+ List of polygons, each polygon represented by a list of x-y coordinates.
258
+ height (`int`):
259
+ Height of the mask.
260
+ width (`int`):
261
+ Width of the mask.
262
+ """
263
+ try:
264
+ from pycocotools import mask as coco_mask
265
+ except ImportError:
266
+ raise ImportError("Pycocotools is not installed in your environment.")
267
+
268
+ masks = []
269
+ for polygons in segmentations:
270
+ rles = coco_mask.frPyObjects(polygons, height, width)
271
+ mask = coco_mask.decode(rles)
272
+ if len(mask.shape) < 3:
273
+ mask = mask[..., None]
274
+ mask = np.asarray(mask, dtype=np.uint8)
275
+ mask = np.any(mask, axis=2)
276
+ masks.append(mask)
277
+ if masks:
278
+ masks = np.stack(masks, axis=0)
279
+ else:
280
+ masks = np.zeros((0, height, width), dtype=np.uint8)
281
+
282
+ return masks
283
+
284
+
285
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->ConditionalDetr
286
+ def prepare_coco_detection_annotation(
287
+ image,
288
+ target,
289
+ return_segmentation_masks: bool = False,
290
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
291
+ ):
292
+ """
293
+ Convert the target in COCO format into the format expected by ConditionalDetr.
294
+ """
295
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
296
+
297
+ image_id = target["image_id"]
298
+ image_id = np.asarray([image_id], dtype=np.int64)
299
+
300
+ # Get all COCO annotations for the given image.
301
+ annotations = target["annotations"]
302
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
303
+
304
+ classes = [obj["category_id"] for obj in annotations]
305
+ classes = np.asarray(classes, dtype=np.int64)
306
+
307
+ # for conversion to coco api
308
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
309
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
310
+
311
+ boxes = [obj["bbox"] for obj in annotations]
312
+ # guard against no boxes via resizing
313
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
314
+ boxes[:, 2:] += boxes[:, :2]
315
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
316
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
317
+
318
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
319
+
320
+ new_target = {}
321
+ new_target["image_id"] = image_id
322
+ new_target["class_labels"] = classes[keep]
323
+ new_target["boxes"] = boxes[keep]
324
+ new_target["area"] = area[keep]
325
+ new_target["iscrowd"] = iscrowd[keep]
326
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
327
+
328
+ if annotations and "keypoints" in annotations[0]:
329
+ keypoints = [obj["keypoints"] for obj in annotations]
330
+ # Converting the filtered keypoints list to a numpy array
331
+ keypoints = np.asarray(keypoints, dtype=np.float32)
332
+ # Apply the keep mask here to filter the relevant annotations
333
+ keypoints = keypoints[keep]
334
+ num_keypoints = keypoints.shape[0]
335
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
336
+ new_target["keypoints"] = keypoints
337
+
338
+ if return_segmentation_masks:
339
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
340
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
341
+ new_target["masks"] = masks[keep]
342
+
343
+ return new_target
344
+
345
+
346
+ # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
347
+ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
348
+ """
349
+ Compute the bounding boxes around the provided panoptic segmentation masks.
350
+
351
+ Args:
352
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
353
+
354
+ Returns:
355
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
356
+ """
357
+ if masks.size == 0:
358
+ return np.zeros((0, 4))
359
+
360
+ h, w = masks.shape[-2:]
361
+ y = np.arange(0, h, dtype=np.float32)
362
+ x = np.arange(0, w, dtype=np.float32)
363
+ # see https://github.com/pytorch/pytorch/issues/50276
364
+ y, x = np.meshgrid(y, x, indexing="ij")
365
+
366
+ x_mask = masks * np.expand_dims(x, axis=0)
367
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
368
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
369
+ x_min = x.filled(fill_value=1e8)
370
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
371
+
372
+ y_mask = masks * np.expand_dims(y, axis=0)
373
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
374
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
375
+ y_min = y.filled(fill_value=1e8)
376
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
377
+
378
+ return np.stack([x_min, y_min, x_max, y_max], 1)
379
+
380
+
381
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->ConditionalDetr
382
+ def prepare_coco_panoptic_annotation(
383
+ image: np.ndarray,
384
+ target: Dict,
385
+ masks_path: Union[str, pathlib.Path],
386
+ return_masks: bool = True,
387
+ input_data_format: Union[ChannelDimension, str] = None,
388
+ ) -> Dict:
389
+ """
390
+ Prepare a coco panoptic annotation for ConditionalDetr.
391
+ """
392
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
393
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
394
+
395
+ new_target = {}
396
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
397
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
398
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
399
+
400
+ if "segments_info" in target:
401
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
402
+ masks = rgb_to_id(masks)
403
+
404
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
405
+ masks = masks == ids[:, None, None]
406
+ masks = masks.astype(np.uint8)
407
+ if return_masks:
408
+ new_target["masks"] = masks
409
+ new_target["boxes"] = masks_to_boxes(masks)
410
+ new_target["class_labels"] = np.array(
411
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
412
+ )
413
+ new_target["iscrowd"] = np.asarray(
414
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
415
+ )
416
+ new_target["area"] = np.asarray(
417
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
418
+ )
419
+
420
+ return new_target
421
+
422
+
423
+ # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image
424
+ def get_segmentation_image(
425
+ masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False
426
+ ):
427
+ h, w = input_size
428
+ final_h, final_w = target_size
429
+
430
+ m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
431
+
432
+ if m_id.shape[-1] == 0:
433
+ # We didn't detect any mask :(
434
+ m_id = np.zeros((h, w), dtype=np.int64)
435
+ else:
436
+ m_id = m_id.argmax(-1).reshape(h, w)
437
+
438
+ if deduplicate:
439
+ # Merge the masks corresponding to the same stuff class
440
+ for equiv in stuff_equiv_classes.values():
441
+ for eq_id in equiv:
442
+ m_id[m_id == eq_id] = equiv[0]
443
+
444
+ seg_img = id_to_rgb(m_id)
445
+ seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
446
+ return seg_img
447
+
448
+
449
+ # Copied from transformers.models.detr.image_processing_detr.get_mask_area
450
+ def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
451
+ final_h, final_w = target_size
452
+ np_seg_img = seg_img.astype(np.uint8)
453
+ np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
454
+ m_id = rgb_to_id(np_seg_img)
455
+ area = [(m_id == i).sum() for i in range(n_classes)]
456
+ return area
457
+
458
+
459
+ # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities
460
+ def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
461
+ probs = scipy.special.softmax(logits, axis=-1)
462
+ labels = probs.argmax(-1, keepdims=True)
463
+ scores = np.take_along_axis(probs, labels, axis=-1)
464
+ scores, labels = scores.squeeze(-1), labels.squeeze(-1)
465
+ return scores, labels
466
+
467
+
468
+ # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample with DetrForSegmentation->ConditionalDetrForSegmentation
469
+ def post_process_panoptic_sample(
470
+ out_logits: np.ndarray,
471
+ masks: np.ndarray,
472
+ boxes: np.ndarray,
473
+ processed_size: Tuple[int, int],
474
+ target_size: Tuple[int, int],
475
+ is_thing_map: Dict,
476
+ threshold=0.85,
477
+ ) -> Dict:
478
+ """
479
+ Converts the output of [`ConditionalDetrForSegmentation`] into panoptic segmentation predictions for a single sample.
480
+
481
+ Args:
482
+ out_logits (`torch.Tensor`):
483
+ The logits for this sample.
484
+ masks (`torch.Tensor`):
485
+ The predicted segmentation masks for this sample.
486
+ boxes (`torch.Tensor`):
487
+ The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y,
488
+ width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding).
489
+ processed_size (`Tuple[int, int]`):
490
+ The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size
491
+ after data augmentation but before batching.
492
+ target_size (`Tuple[int, int]`):
493
+ The target size of the image, `(height, width)` corresponding to the requested final size of the
494
+ prediction.
495
+ is_thing_map (`Dict`):
496
+ A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not.
497
+ threshold (`float`, *optional*, defaults to 0.85):
498
+ The threshold used to binarize the segmentation masks.
499
+ """
500
+ # we filter empty queries and detection below threshold
501
+ scores, labels = score_labels_from_class_probabilities(out_logits)
502
+ keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
503
+
504
+ cur_scores = scores[keep]
505
+ cur_classes = labels[keep]
506
+ cur_boxes = center_to_corners_format(boxes[keep])
507
+
508
+ if len(cur_boxes) != len(cur_classes):
509
+ raise ValueError("Not as many boxes as there are classes")
510
+
511
+ cur_masks = masks[keep]
512
+ cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
513
+ cur_masks = safe_squeeze(cur_masks, 1)
514
+ b, h, w = cur_masks.shape
515
+
516
+ # It may be that we have several predicted masks for the same stuff class.
517
+ # In the following, we track the list of masks ids for each stuff class (they are merged later on)
518
+ cur_masks = cur_masks.reshape(b, -1)
519
+ stuff_equiv_classes = defaultdict(list)
520
+ for k, label in enumerate(cur_classes):
521
+ if not is_thing_map[label]:
522
+ stuff_equiv_classes[label].append(k)
523
+
524
+ seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
525
+ area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
526
+
527
+ # We filter out any mask that is too small
528
+ if cur_classes.size() > 0:
529
+ # We know filter empty masks as long as we find some
530
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
531
+ while filtered_small.any():
532
+ cur_masks = cur_masks[~filtered_small]
533
+ cur_scores = cur_scores[~filtered_small]
534
+ cur_classes = cur_classes[~filtered_small]
535
+ seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
536
+ area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
537
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
538
+ else:
539
+ cur_classes = np.ones((1, 1), dtype=np.int64)
540
+
541
+ segments_info = [
542
+ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a}
543
+ for i, (cat, a) in enumerate(zip(cur_classes, area))
544
+ ]
545
+ del cur_classes
546
+
547
+ with io.BytesIO() as out:
548
+ PIL.Image.fromarray(seg_img).save(out, format="PNG")
549
+ predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
550
+
551
+ return predictions
552
+
553
+
554
+ # Copied from transformers.models.detr.image_processing_detr.resize_annotation
555
+ def resize_annotation(
556
+ annotation: Dict[str, Any],
557
+ orig_size: Tuple[int, int],
558
+ target_size: Tuple[int, int],
559
+ threshold: float = 0.5,
560
+ resample: PILImageResampling = PILImageResampling.NEAREST,
561
+ ):
562
+ """
563
+ Resizes an annotation to a target size.
564
+
565
+ Args:
566
+ annotation (`Dict[str, Any]`):
567
+ The annotation dictionary.
568
+ orig_size (`Tuple[int, int]`):
569
+ The original size of the input image.
570
+ target_size (`Tuple[int, int]`):
571
+ The target size of the image, as returned by the preprocessing `resize` step.
572
+ threshold (`float`, *optional*, defaults to 0.5):
573
+ The threshold used to binarize the segmentation masks.
574
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
575
+ The resampling filter to use when resizing the masks.
576
+ """
577
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
578
+ ratio_height, ratio_width = ratios
579
+
580
+ new_annotation = {}
581
+ new_annotation["size"] = target_size
582
+
583
+ for key, value in annotation.items():
584
+ if key == "boxes":
585
+ boxes = value
586
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
587
+ new_annotation["boxes"] = scaled_boxes
588
+ elif key == "area":
589
+ area = value
590
+ scaled_area = area * (ratio_width * ratio_height)
591
+ new_annotation["area"] = scaled_area
592
+ elif key == "masks":
593
+ masks = value[:, None]
594
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
595
+ masks = masks.astype(np.float32)
596
+ masks = masks[:, 0] > threshold
597
+ new_annotation["masks"] = masks
598
+ elif key == "size":
599
+ new_annotation["size"] = target_size
600
+ else:
601
+ new_annotation[key] = value
602
+
603
+ return new_annotation
604
+
605
+
606
+ # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle
607
+ def binary_mask_to_rle(mask):
608
+ """
609
+ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
610
+
611
+ Args:
612
+ mask (`torch.Tensor` or `numpy.array`):
613
+ A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
614
+ segment_id or class_id.
615
+ Returns:
616
+ `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
617
+ format.
618
+ """
619
+ if is_torch_tensor(mask):
620
+ mask = mask.numpy()
621
+
622
+ pixels = mask.flatten()
623
+ pixels = np.concatenate([[0], pixels, [0]])
624
+ runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
625
+ runs[1::2] -= runs[::2]
626
+ return list(runs)
627
+
628
+
629
+ # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle
630
+ def convert_segmentation_to_rle(segmentation):
631
+ """
632
+ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
633
+
634
+ Args:
635
+ segmentation (`torch.Tensor` or `numpy.array`):
636
+ A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
637
+ Returns:
638
+ `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
639
+ """
640
+ segment_ids = torch.unique(segmentation)
641
+
642
+ run_length_encodings = []
643
+ for idx in segment_ids:
644
+ mask = torch.where(segmentation == idx, 1, 0)
645
+ rle = binary_mask_to_rle(mask)
646
+ run_length_encodings.append(rle)
647
+
648
+ return run_length_encodings
649
+
650
+
651
+ # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects
652
+ def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
653
+ """
654
+ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
655
+ `labels`.
656
+
657
+ Args:
658
+ masks (`torch.Tensor`):
659
+ A tensor of shape `(num_queries, height, width)`.
660
+ scores (`torch.Tensor`):
661
+ A tensor of shape `(num_queries)`.
662
+ labels (`torch.Tensor`):
663
+ A tensor of shape `(num_queries)`.
664
+ object_mask_threshold (`float`):
665
+ A number between 0 and 1 used to binarize the masks.
666
+ Raises:
667
+ `ValueError`: Raised when the first dimension doesn't match in all input tensors.
668
+ Returns:
669
+ `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
670
+ < `object_mask_threshold`.
671
+ """
672
+ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
673
+ raise ValueError("mask, scores and labels must have the same shape!")
674
+
675
+ to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
676
+
677
+ return masks[to_keep], scores[to_keep], labels[to_keep]
678
+
679
+
680
+ # Copied from transformers.models.detr.image_processing_detr.check_segment_validity
681
+ def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
682
+ # Get the mask associated with the k class
683
+ mask_k = mask_labels == k
684
+ mask_k_area = mask_k.sum()
685
+
686
+ # Compute the area of all the stuff in query k
687
+ original_area = (mask_probs[k] >= mask_threshold).sum()
688
+ mask_exists = mask_k_area > 0 and original_area > 0
689
+
690
+ # Eliminate disconnected tiny segments
691
+ if mask_exists:
692
+ area_ratio = mask_k_area / original_area
693
+ if not area_ratio.item() > overlap_mask_area_threshold:
694
+ mask_exists = False
695
+
696
+ return mask_exists, mask_k
697
+
698
+
699
+ # Copied from transformers.models.detr.image_processing_detr.compute_segments
700
+ def compute_segments(
701
+ mask_probs,
702
+ pred_scores,
703
+ pred_labels,
704
+ mask_threshold: float = 0.5,
705
+ overlap_mask_area_threshold: float = 0.8,
706
+ label_ids_to_fuse: Optional[Set[int]] = None,
707
+ target_size: Tuple[int, int] = None,
708
+ ):
709
+ height = mask_probs.shape[1] if target_size is None else target_size[0]
710
+ width = mask_probs.shape[2] if target_size is None else target_size[1]
711
+
712
+ segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
713
+ segments: List[Dict] = []
714
+
715
+ if target_size is not None:
716
+ mask_probs = nn.functional.interpolate(
717
+ mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
718
+ )[0]
719
+
720
+ current_segment_id = 0
721
+
722
+ # Weigh each mask by its prediction score
723
+ mask_probs *= pred_scores.view(-1, 1, 1)
724
+ mask_labels = mask_probs.argmax(0) # [height, width]
725
+
726
+ # Keep track of instances of each class
727
+ stuff_memory_list: Dict[str, int] = {}
728
+ for k in range(pred_labels.shape[0]):
729
+ pred_class = pred_labels[k].item()
730
+ should_fuse = pred_class in label_ids_to_fuse
731
+
732
+ # Check if mask exists and large enough to be a segment
733
+ mask_exists, mask_k = check_segment_validity(
734
+ mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
735
+ )
736
+
737
+ if mask_exists:
738
+ if pred_class in stuff_memory_list:
739
+ current_segment_id = stuff_memory_list[pred_class]
740
+ else:
741
+ current_segment_id += 1
742
+
743
+ # Add current object segment to final segmentation map
744
+ segmentation[mask_k] = current_segment_id
745
+ segment_score = round(pred_scores[k].item(), 6)
746
+ segments.append(
747
+ {
748
+ "id": current_segment_id,
749
+ "label_id": pred_class,
750
+ "was_fused": should_fuse,
751
+ "score": segment_score,
752
+ }
753
+ )
754
+ if should_fuse:
755
+ stuff_memory_list[pred_class] = current_segment_id
756
+
757
+ return segmentation, segments
758
+
759
+
760
+ class ConditionalDetrImageProcessor(BaseImageProcessor):
761
+ r"""
762
+ Constructs a Conditional Detr image processor.
763
+
764
+ Args:
765
+ format (`str`, *optional*, defaults to `"coco_detection"`):
766
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
767
+ do_resize (`bool`, *optional*, defaults to `True`):
768
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
769
+ overridden by the `do_resize` parameter in the `preprocess` method.
770
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
771
+ Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
772
+ the `preprocess` method.
773
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
774
+ Resampling filter to use if resizing the image.
775
+ do_rescale (`bool`, *optional*, defaults to `True`):
776
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
777
+ `do_rescale` parameter in the `preprocess` method.
778
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
779
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
780
+ `preprocess` method.
781
+ do_normalize:
782
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
783
+ `preprocess` method.
784
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
785
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
786
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
787
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
788
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
789
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
790
+ do_convert_annotations (`bool`, *optional*, defaults to `True`):
791
+ Controls whether to convert the annotations to the format expected by the DETR model. Converts the
792
+ bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
793
+ Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
794
+ do_pad (`bool`, *optional*, defaults to `True`):
795
+ Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
796
+ method. If `True` will pad the images in the batch to the largest height and width in the batch.
797
+ Padding will be applied to the bottom and right of the image with zeros.
798
+ """
799
+
800
+ model_input_names = ["pixel_values", "pixel_mask"]
801
+
802
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__
803
+ def __init__(
804
+ self,
805
+ format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
806
+ do_resize: bool = True,
807
+ size: Dict[str, int] = None,
808
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
809
+ do_rescale: bool = True,
810
+ rescale_factor: Union[int, float] = 1 / 255,
811
+ do_normalize: bool = True,
812
+ image_mean: Union[float, List[float]] = None,
813
+ image_std: Union[float, List[float]] = None,
814
+ do_convert_annotations: Optional[bool] = None,
815
+ do_pad: bool = True,
816
+ **kwargs,
817
+ ) -> None:
818
+ if "pad_and_return_pixel_mask" in kwargs:
819
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
820
+
821
+ if "max_size" in kwargs:
822
+ logger.warning_once(
823
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
824
+ "Please specify in `size['longest_edge'] instead`.",
825
+ )
826
+ max_size = kwargs.pop("max_size")
827
+ else:
828
+ max_size = None if size is None else 1333
829
+
830
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
831
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
832
+
833
+ # Backwards compatibility
834
+ if do_convert_annotations is None:
835
+ do_convert_annotations = do_normalize
836
+
837
+ super().__init__(**kwargs)
838
+ self.format = format
839
+ self.do_resize = do_resize
840
+ self.size = size
841
+ self.resample = resample
842
+ self.do_rescale = do_rescale
843
+ self.rescale_factor = rescale_factor
844
+ self.do_normalize = do_normalize
845
+ self.do_convert_annotations = do_convert_annotations
846
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
847
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
848
+ self.do_pad = do_pad
849
+ self._valid_processor_keys = [
850
+ "images",
851
+ "annotations",
852
+ "return_segmentation_masks",
853
+ "masks_path",
854
+ "do_resize",
855
+ "size",
856
+ "resample",
857
+ "do_rescale",
858
+ "rescale_factor",
859
+ "do_normalize",
860
+ "do_convert_annotations",
861
+ "image_mean",
862
+ "image_std",
863
+ "do_pad",
864
+ "format",
865
+ "return_tensors",
866
+ "data_format",
867
+ "input_data_format",
868
+ ]
869
+
870
+ @classmethod
871
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->ConditionalDetr
872
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
873
+ """
874
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
875
+ created using from_dict and kwargs e.g. `ConditionalDetrImageProcessor.from_pretrained(checkpoint, size=600,
876
+ max_size=800)`
877
+ """
878
+ image_processor_dict = image_processor_dict.copy()
879
+ if "max_size" in kwargs:
880
+ image_processor_dict["max_size"] = kwargs.pop("max_size")
881
+ if "pad_and_return_pixel_mask" in kwargs:
882
+ image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
883
+ return super().from_dict(image_processor_dict, **kwargs)
884
+
885
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->ConditionalDetr
886
+ def prepare_annotation(
887
+ self,
888
+ image: np.ndarray,
889
+ target: Dict,
890
+ format: Optional[AnnotationFormat] = None,
891
+ return_segmentation_masks: bool = None,
892
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
893
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
894
+ ) -> Dict:
895
+ """
896
+ Prepare an annotation for feeding into ConditionalDetr model.
897
+ """
898
+ format = format if format is not None else self.format
899
+
900
+ if format == AnnotationFormat.COCO_DETECTION:
901
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
902
+ target = prepare_coco_detection_annotation(
903
+ image, target, return_segmentation_masks, input_data_format=input_data_format
904
+ )
905
+ elif format == AnnotationFormat.COCO_PANOPTIC:
906
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
907
+ target = prepare_coco_panoptic_annotation(
908
+ image,
909
+ target,
910
+ masks_path=masks_path,
911
+ return_masks=return_segmentation_masks,
912
+ input_data_format=input_data_format,
913
+ )
914
+ else:
915
+ raise ValueError(f"Format {format} is not supported.")
916
+ return target
917
+
918
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
919
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
920
+ logger.warning_once(
921
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
922
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
923
+ "does not return the image anymore.",
924
+ )
925
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
926
+ return image, target
927
+
928
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
929
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
930
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
931
+ return convert_coco_poly_to_mask(*args, **kwargs)
932
+
933
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection with DETR->ConditionalDetr
934
+ def prepare_coco_detection(self, *args, **kwargs):
935
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
936
+ return prepare_coco_detection_annotation(*args, **kwargs)
937
+
938
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
939
+ def prepare_coco_panoptic(self, *args, **kwargs):
940
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
941
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
942
+
943
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize
944
+ def resize(
945
+ self,
946
+ image: np.ndarray,
947
+ size: Dict[str, int],
948
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
949
+ data_format: Optional[ChannelDimension] = None,
950
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
951
+ **kwargs,
952
+ ) -> np.ndarray:
953
+ """
954
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
955
+ int, smaller edge of the image will be matched to this number.
956
+
957
+ Args:
958
+ image (`np.ndarray`):
959
+ Image to resize.
960
+ size (`Dict[str, int]`):
961
+ Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or
962
+ `height` and `width`.
963
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
964
+ Resampling filter to use if resizing the image.
965
+ data_format (`str` or `ChannelDimension`, *optional*):
966
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
967
+ image is used.
968
+ input_data_format (`ChannelDimension` or `str`, *optional*):
969
+ The channel dimension format of the input image. If not provided, it will be inferred.
970
+ """
971
+ if "max_size" in kwargs:
972
+ logger.warning_once(
973
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
974
+ "Please specify in `size['longest_edge'] instead`.",
975
+ )
976
+ max_size = kwargs.pop("max_size")
977
+ else:
978
+ max_size = None
979
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
980
+ if "shortest_edge" in size and "longest_edge" in size:
981
+ size = get_resize_output_image_size(
982
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
983
+ )
984
+ elif "height" in size and "width" in size:
985
+ size = (size["height"], size["width"])
986
+ else:
987
+ raise ValueError(
988
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
989
+ f" {size.keys()}."
990
+ )
991
+ image = resize(
992
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
993
+ )
994
+ return image
995
+
996
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
997
+ def resize_annotation(
998
+ self,
999
+ annotation,
1000
+ orig_size,
1001
+ size,
1002
+ resample: PILImageResampling = PILImageResampling.NEAREST,
1003
+ ) -> Dict:
1004
+ """
1005
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
1006
+ to this number.
1007
+ """
1008
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
1009
+
1010
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
1011
+ def rescale(
1012
+ self,
1013
+ image: np.ndarray,
1014
+ rescale_factor: float,
1015
+ data_format: Optional[Union[str, ChannelDimension]] = None,
1016
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1017
+ ) -> np.ndarray:
1018
+ """
1019
+ Rescale the image by the given factor. image = image * rescale_factor.
1020
+
1021
+ Args:
1022
+ image (`np.ndarray`):
1023
+ Image to rescale.
1024
+ rescale_factor (`float`):
1025
+ The value to use for rescaling.
1026
+ data_format (`str` or `ChannelDimension`, *optional*):
1027
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
1028
+ image is used. Can be one of:
1029
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1030
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1031
+ input_data_format (`str` or `ChannelDimension`, *optional*):
1032
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
1033
+ one of:
1034
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1035
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1036
+ """
1037
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
1038
+
1039
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
1040
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
1041
+ """
1042
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
1043
+ `[center_x, center_y, width, height]` format and from absolute to relative pixel values.
1044
+ """
1045
+ return normalize_annotation(annotation, image_size=image_size)
1046
+
1047
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
1048
+ def _update_annotation_for_padded_image(
1049
+ self,
1050
+ annotation: Dict,
1051
+ input_image_size: Tuple[int, int],
1052
+ output_image_size: Tuple[int, int],
1053
+ padding,
1054
+ update_bboxes,
1055
+ ) -> Dict:
1056
+ """
1057
+ Update the annotation for a padded image.
1058
+ """
1059
+ new_annotation = {}
1060
+ new_annotation["size"] = output_image_size
1061
+
1062
+ for key, value in annotation.items():
1063
+ if key == "masks":
1064
+ masks = value
1065
+ masks = pad(
1066
+ masks,
1067
+ padding,
1068
+ mode=PaddingMode.CONSTANT,
1069
+ constant_values=0,
1070
+ input_data_format=ChannelDimension.FIRST,
1071
+ )
1072
+ masks = safe_squeeze(masks, 1)
1073
+ new_annotation["masks"] = masks
1074
+ elif key == "boxes" and update_bboxes:
1075
+ boxes = value
1076
+ boxes *= np.asarray(
1077
+ [
1078
+ input_image_size[1] / output_image_size[1],
1079
+ input_image_size[0] / output_image_size[0],
1080
+ input_image_size[1] / output_image_size[1],
1081
+ input_image_size[0] / output_image_size[0],
1082
+ ]
1083
+ )
1084
+ new_annotation["boxes"] = boxes
1085
+ elif key == "size":
1086
+ new_annotation["size"] = output_image_size
1087
+ else:
1088
+ new_annotation[key] = value
1089
+ return new_annotation
1090
+
1091
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
1092
+ def _pad_image(
1093
+ self,
1094
+ image: np.ndarray,
1095
+ output_size: Tuple[int, int],
1096
+ annotation: Optional[Dict[str, Any]] = None,
1097
+ constant_values: Union[float, Iterable[float]] = 0,
1098
+ data_format: Optional[ChannelDimension] = None,
1099
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1100
+ update_bboxes: bool = True,
1101
+ ) -> np.ndarray:
1102
+ """
1103
+ Pad an image with zeros to the given size.
1104
+ """
1105
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
1106
+ output_height, output_width = output_size
1107
+
1108
+ pad_bottom = output_height - input_height
1109
+ pad_right = output_width - input_width
1110
+ padding = ((0, pad_bottom), (0, pad_right))
1111
+ padded_image = pad(
1112
+ image,
1113
+ padding,
1114
+ mode=PaddingMode.CONSTANT,
1115
+ constant_values=constant_values,
1116
+ data_format=data_format,
1117
+ input_data_format=input_data_format,
1118
+ )
1119
+ if annotation is not None:
1120
+ annotation = self._update_annotation_for_padded_image(
1121
+ annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
1122
+ )
1123
+ return padded_image, annotation
1124
+
1125
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
1126
+ def pad(
1127
+ self,
1128
+ images: List[np.ndarray],
1129
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1130
+ constant_values: Union[float, Iterable[float]] = 0,
1131
+ return_pixel_mask: bool = True,
1132
+ return_tensors: Optional[Union[str, TensorType]] = None,
1133
+ data_format: Optional[ChannelDimension] = None,
1134
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1135
+ update_bboxes: bool = True,
1136
+ ) -> BatchFeature:
1137
+ """
1138
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
1139
+ in the batch and optionally returns their corresponding pixel mask.
1140
+
1141
+ Args:
1142
+ images (List[`np.ndarray`]):
1143
+ Images to pad.
1144
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1145
+ Annotations to transform according to the padding that is applied to the images.
1146
+ constant_values (`float` or `Iterable[float]`, *optional*):
1147
+ The value to use for the padding if `mode` is `"constant"`.
1148
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
1149
+ Whether to return a pixel mask.
1150
+ return_tensors (`str` or `TensorType`, *optional*):
1151
+ The type of tensors to return. Can be one of:
1152
+ - Unset: Return a list of `np.ndarray`.
1153
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
1154
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
1155
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
1156
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
1157
+ data_format (`str` or `ChannelDimension`, *optional*):
1158
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
1159
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1160
+ The channel dimension format of the input image. If not provided, it will be inferred.
1161
+ update_bboxes (`bool`, *optional*, defaults to `True`):
1162
+ Whether to update the bounding boxes in the annotations to match the padded images. If the
1163
+ bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
1164
+ format, the bounding boxes will not be updated.
1165
+ """
1166
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
1167
+
1168
+ annotation_list = annotations if annotations is not None else [None] * len(images)
1169
+ padded_images = []
1170
+ padded_annotations = []
1171
+ for image, annotation in zip(images, annotation_list):
1172
+ padded_image, padded_annotation = self._pad_image(
1173
+ image,
1174
+ pad_size,
1175
+ annotation,
1176
+ constant_values=constant_values,
1177
+ data_format=data_format,
1178
+ input_data_format=input_data_format,
1179
+ update_bboxes=update_bboxes,
1180
+ )
1181
+ padded_images.append(padded_image)
1182
+ padded_annotations.append(padded_annotation)
1183
+
1184
+ data = {"pixel_values": padded_images}
1185
+
1186
+ if return_pixel_mask:
1187
+ masks = [
1188
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
1189
+ for image in images
1190
+ ]
1191
+ data["pixel_mask"] = masks
1192
+
1193
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
1194
+
1195
+ if annotations is not None:
1196
+ encoded_inputs["labels"] = [
1197
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
1198
+ ]
1199
+
1200
+ return encoded_inputs
1201
+
1202
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess
1203
+ def preprocess(
1204
+ self,
1205
+ images: ImageInput,
1206
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1207
+ return_segmentation_masks: bool = None,
1208
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
1209
+ do_resize: Optional[bool] = None,
1210
+ size: Optional[Dict[str, int]] = None,
1211
+ resample=None, # PILImageResampling
1212
+ do_rescale: Optional[bool] = None,
1213
+ rescale_factor: Optional[Union[int, float]] = None,
1214
+ do_normalize: Optional[bool] = None,
1215
+ do_convert_annotations: Optional[bool] = None,
1216
+ image_mean: Optional[Union[float, List[float]]] = None,
1217
+ image_std: Optional[Union[float, List[float]]] = None,
1218
+ do_pad: Optional[bool] = None,
1219
+ format: Optional[Union[str, AnnotationFormat]] = None,
1220
+ return_tensors: Optional[Union[TensorType, str]] = None,
1221
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
1222
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1223
+ **kwargs,
1224
+ ) -> BatchFeature:
1225
+ """
1226
+ Preprocess an image or a batch of images so that it can be used by the model.
1227
+
1228
+ Args:
1229
+ images (`ImageInput`):
1230
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
1231
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
1232
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1233
+ List of annotations associated with the image or batch of images. If annotation is for object
1234
+ detection, the annotations should be a dictionary with the following keys:
1235
+ - "image_id" (`int`): The image id.
1236
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
1237
+ dictionary. An image can have no annotations, in which case the list should be empty.
1238
+ If annotation is for segmentation, the annotations should be a dictionary with the following keys:
1239
+ - "image_id" (`int`): The image id.
1240
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
1241
+ An image can have no segments, in which case the list should be empty.
1242
+ - "file_name" (`str`): The file name of the image.
1243
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
1244
+ Whether to return segmentation masks.
1245
+ masks_path (`str` or `pathlib.Path`, *optional*):
1246
+ Path to the directory containing the segmentation masks.
1247
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
1248
+ Whether to resize the image.
1249
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
1250
+ Size of the image after resizing.
1251
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
1252
+ Resampling filter to use when resizing the image.
1253
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
1254
+ Whether to rescale the image.
1255
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
1256
+ Rescale factor to use when rescaling the image.
1257
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
1258
+ Whether to normalize the image.
1259
+ do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
1260
+ Whether to convert the annotations to the format expected by the model. Converts the bounding
1261
+ boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
1262
+ and in relative coordinates.
1263
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
1264
+ Mean to use when normalizing the image.
1265
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
1266
+ Standard deviation to use when normalizing the image.
1267
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
1268
+ Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
1269
+ and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
1270
+ format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
1271
+ Format of the annotations.
1272
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
1273
+ Type of tensors to return. If `None`, will return the list of images.
1274
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
1275
+ The channel dimension format for the output image. Can be one of:
1276
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1277
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1278
+ - Unset: Use the channel dimension format of the input image.
1279
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1280
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
1281
+ from the input image. Can be one of:
1282
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1283
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1284
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
1285
+ """
1286
+ if "pad_and_return_pixel_mask" in kwargs:
1287
+ logger.warning_once(
1288
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
1289
+ "use `do_pad` instead."
1290
+ )
1291
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
1292
+
1293
+ max_size = None
1294
+ if "max_size" in kwargs:
1295
+ logger.warning_once(
1296
+ "The `max_size` argument is deprecated and will be removed in a future version, use"
1297
+ " `size['longest_edge']` instead."
1298
+ )
1299
+ size = kwargs.pop("max_size")
1300
+
1301
+ do_resize = self.do_resize if do_resize is None else do_resize
1302
+ size = self.size if size is None else size
1303
+ size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
1304
+ resample = self.resample if resample is None else resample
1305
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
1306
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
1307
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
1308
+ image_mean = self.image_mean if image_mean is None else image_mean
1309
+ image_std = self.image_std if image_std is None else image_std
1310
+ do_convert_annotations = (
1311
+ self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
1312
+ )
1313
+ do_pad = self.do_pad if do_pad is None else do_pad
1314
+ format = self.format if format is None else format
1315
+
1316
+ images = make_list_of_images(images)
1317
+
1318
+ if not valid_images(images):
1319
+ raise ValueError(
1320
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
1321
+ "torch.Tensor, tf.Tensor or jax.ndarray."
1322
+ )
1323
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
1324
+
1325
+ # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
1326
+ validate_preprocess_arguments(
1327
+ do_rescale=do_rescale,
1328
+ rescale_factor=rescale_factor,
1329
+ do_normalize=do_normalize,
1330
+ image_mean=image_mean,
1331
+ image_std=image_std,
1332
+ do_resize=do_resize,
1333
+ size=size,
1334
+ resample=resample,
1335
+ )
1336
+
1337
+ if annotations is not None and isinstance(annotations, dict):
1338
+ annotations = [annotations]
1339
+
1340
+ if annotations is not None and len(images) != len(annotations):
1341
+ raise ValueError(
1342
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
1343
+ )
1344
+
1345
+ format = AnnotationFormat(format)
1346
+ if annotations is not None:
1347
+ validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
1348
+
1349
+ if (
1350
+ masks_path is not None
1351
+ and format == AnnotationFormat.COCO_PANOPTIC
1352
+ and not isinstance(masks_path, (pathlib.Path, str))
1353
+ ):
1354
+ raise ValueError(
1355
+ "The path to the directory containing the mask PNG files should be provided as a"
1356
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
1357
+ )
1358
+
1359
+ # All transformations expect numpy arrays
1360
+ images = [to_numpy_array(image) for image in images]
1361
+
1362
+ if is_scaled_image(images[0]) and do_rescale:
1363
+ logger.warning_once(
1364
+ "It looks like you are trying to rescale already rescaled images. If the input"
1365
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
1366
+ )
1367
+
1368
+ if input_data_format is None:
1369
+ # We assume that all images have the same channel dimension format.
1370
+ input_data_format = infer_channel_dimension_format(images[0])
1371
+
1372
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
1373
+ if annotations is not None:
1374
+ prepared_images = []
1375
+ prepared_annotations = []
1376
+ for image, target in zip(images, annotations):
1377
+ target = self.prepare_annotation(
1378
+ image,
1379
+ target,
1380
+ format,
1381
+ return_segmentation_masks=return_segmentation_masks,
1382
+ masks_path=masks_path,
1383
+ input_data_format=input_data_format,
1384
+ )
1385
+ prepared_images.append(image)
1386
+ prepared_annotations.append(target)
1387
+ images = prepared_images
1388
+ annotations = prepared_annotations
1389
+ del prepared_images, prepared_annotations
1390
+
1391
+ # transformations
1392
+ if do_resize:
1393
+ if annotations is not None:
1394
+ resized_images, resized_annotations = [], []
1395
+ for image, target in zip(images, annotations):
1396
+ orig_size = get_image_size(image, input_data_format)
1397
+ resized_image = self.resize(
1398
+ image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format
1399
+ )
1400
+ resized_annotation = self.resize_annotation(
1401
+ target, orig_size, get_image_size(resized_image, input_data_format)
1402
+ )
1403
+ resized_images.append(resized_image)
1404
+ resized_annotations.append(resized_annotation)
1405
+ images = resized_images
1406
+ annotations = resized_annotations
1407
+ del resized_images, resized_annotations
1408
+ else:
1409
+ images = [
1410
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
1411
+ for image in images
1412
+ ]
1413
+
1414
+ if do_rescale:
1415
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
1416
+
1417
+ if do_normalize:
1418
+ images = [
1419
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
1420
+ ]
1421
+
1422
+ if do_convert_annotations and annotations is not None:
1423
+ annotations = [
1424
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
1425
+ for annotation, image in zip(annotations, images)
1426
+ ]
1427
+
1428
+ if do_pad:
1429
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
1430
+ encoded_inputs = self.pad(
1431
+ images,
1432
+ annotations=annotations,
1433
+ return_pixel_mask=True,
1434
+ data_format=data_format,
1435
+ input_data_format=input_data_format,
1436
+ update_bboxes=do_convert_annotations,
1437
+ return_tensors=return_tensors,
1438
+ )
1439
+ else:
1440
+ images = [
1441
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
1442
+ for image in images
1443
+ ]
1444
+ encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
1445
+ if annotations is not None:
1446
+ encoded_inputs["labels"] = [
1447
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
1448
+ ]
1449
+
1450
+ return encoded_inputs
1451
+
1452
+ # POSTPROCESSING METHODS - TODO: add support for other frameworks
1453
+ def post_process(self, outputs, target_sizes):
1454
+ """
1455
+ Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the Pascal VOC format (xmin, ymin, xmax, ymax).
1456
+ Only supports PyTorch.
1457
+
1458
+ Args:
1459
+ outputs ([`ConditionalDetrObjectDetectionOutput`]):
1460
+ Raw outputs of the model.
1461
+ target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
1462
+ Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original
1463
+ image size (before any data augmentation). For visualization, this should be the image size after data
1464
+ augment, but before padding.
1465
+ Returns:
1466
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1467
+ in the batch as predicted by the model.
1468
+ """
1469
+ logging.warning_once(
1470
+ "`post_process` is deprecated and will be removed in v5 of Transformers, please use"
1471
+ " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.",
1472
+ )
1473
+
1474
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1475
+
1476
+ if len(out_logits) != len(target_sizes):
1477
+ raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
1478
+ if target_sizes.shape[1] != 2:
1479
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
1480
+
1481
+ prob = out_logits.sigmoid()
1482
+ topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1)
1483
+ scores = topk_values
1484
+ topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
1485
+ labels = topk_indexes % out_logits.shape[2]
1486
+ boxes = center_to_corners_format(out_bbox)
1487
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
1488
+
1489
+ # and from relative [0, 1] to absolute [0, height] coordinates
1490
+ img_h, img_w = target_sizes.unbind(1)
1491
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
1492
+ boxes = boxes * scale_fct[:, None, :]
1493
+
1494
+ results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
1495
+
1496
+ return results
1497
+
1498
+ # Copied from transformers.models.deformable_detr.image_processing_deformable_detr.DeformableDetrImageProcessor.post_process_object_detection with DeformableDetr->ConditionalDetr
1499
+ def post_process_object_detection(
1500
+ self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, top_k: int = 100
1501
+ ):
1502
+ """
1503
+ Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x,
1504
+ top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1505
+
1506
+ Args:
1507
+ outputs ([`DetrObjectDetectionOutput`]):
1508
+ Raw outputs of the model.
1509
+ threshold (`float`, *optional*):
1510
+ Score threshold to keep object detection predictions.
1511
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
1512
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
1513
+ (height, width) of each image in the batch. If left to None, predictions will not be resized.
1514
+ top_k (`int`, *optional*, defaults to 100):
1515
+ Keep only top k bounding boxes before filtering by thresholding.
1516
+
1517
+ Returns:
1518
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1519
+ in the batch as predicted by the model.
1520
+ """
1521
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1522
+
1523
+ if target_sizes is not None:
1524
+ if len(out_logits) != len(target_sizes):
1525
+ raise ValueError(
1526
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1527
+ )
1528
+
1529
+ prob = out_logits.sigmoid()
1530
+ prob = prob.view(out_logits.shape[0], -1)
1531
+ k_value = min(top_k, prob.size(1))
1532
+ topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
1533
+ scores = topk_values
1534
+ topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
1535
+ labels = topk_indexes % out_logits.shape[2]
1536
+ boxes = center_to_corners_format(out_bbox)
1537
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
1538
+
1539
+ # and from relative [0, 1] to absolute [0, height] coordinates
1540
+ if target_sizes is not None:
1541
+ if isinstance(target_sizes, List):
1542
+ img_h = torch.Tensor([i[0] for i in target_sizes])
1543
+ img_w = torch.Tensor([i[1] for i in target_sizes])
1544
+ else:
1545
+ img_h, img_w = target_sizes.unbind(1)
1546
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
1547
+ boxes = boxes * scale_fct[:, None, :]
1548
+
1549
+ results = []
1550
+ for s, l, b in zip(scores, labels, boxes):
1551
+ score = s[s > threshold]
1552
+ label = l[s > threshold]
1553
+ box = b[s > threshold]
1554
+ results.append({"scores": score, "labels": label, "boxes": box})
1555
+
1556
+ return results
1557
+
1558
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_semantic_segmentation with Detr->ConditionalDetr
1559
+ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None):
1560
+ """
1561
+ Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch.
1562
+
1563
+ Args:
1564
+ outputs ([`ConditionalDetrForSegmentation`]):
1565
+ Raw outputs of the model.
1566
+ target_sizes (`List[Tuple[int, int]]`, *optional*):
1567
+ A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the
1568
+ batch. If unset, predictions will not be resized.
1569
+ Returns:
1570
+ `List[torch.Tensor]`:
1571
+ A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
1572
+ corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
1573
+ `torch.Tensor` correspond to a semantic class id.
1574
+ """
1575
+ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
1576
+ masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
1577
+
1578
+ # Remove the null class `[..., :-1]`
1579
+ masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
1580
+ masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1581
+
1582
+ # Semantic segmentation logits of shape (batch_size, num_classes, height, width)
1583
+ segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
1584
+ batch_size = class_queries_logits.shape[0]
1585
+
1586
+ # Resize logits and compute semantic segmentation maps
1587
+ if target_sizes is not None:
1588
+ if batch_size != len(target_sizes):
1589
+ raise ValueError(
1590
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1591
+ )
1592
+
1593
+ semantic_segmentation = []
1594
+ for idx in range(batch_size):
1595
+ resized_logits = nn.functional.interpolate(
1596
+ segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
1597
+ )
1598
+ semantic_map = resized_logits[0].argmax(dim=0)
1599
+ semantic_segmentation.append(semantic_map)
1600
+ else:
1601
+ semantic_segmentation = segmentation.argmax(dim=1)
1602
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
1603
+
1604
+ return semantic_segmentation
1605
+
1606
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_instance_segmentation with Detr->ConditionalDetr
1607
+ def post_process_instance_segmentation(
1608
+ self,
1609
+ outputs,
1610
+ threshold: float = 0.5,
1611
+ mask_threshold: float = 0.5,
1612
+ overlap_mask_area_threshold: float = 0.8,
1613
+ target_sizes: Optional[List[Tuple[int, int]]] = None,
1614
+ return_coco_annotation: Optional[bool] = False,
1615
+ ) -> List[Dict]:
1616
+ """
1617
+ Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch.
1618
+
1619
+ Args:
1620
+ outputs ([`ConditionalDetrForSegmentation`]):
1621
+ Raw outputs of the model.
1622
+ threshold (`float`, *optional*, defaults to 0.5):
1623
+ The probability score threshold to keep predicted instance masks.
1624
+ mask_threshold (`float`, *optional*, defaults to 0.5):
1625
+ Threshold to use when turning the predicted masks into binary values.
1626
+ overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
1627
+ The overlap mask area threshold to merge or discard small disconnected parts within each binary
1628
+ instance mask.
1629
+ target_sizes (`List[Tuple]`, *optional*):
1630
+ List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
1631
+ final size (height, width) of each prediction. If unset, predictions will not be resized.
1632
+ return_coco_annotation (`bool`, *optional*):
1633
+ Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE)
1634
+ format.
1635
+ Returns:
1636
+ `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
1637
+ - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or
1638
+ `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
1639
+ `True`. Set to `None` if no mask if found above `threshold`.
1640
+ - **segments_info** -- A dictionary that contains additional information on each segment.
1641
+ - **id** -- An integer representing the `segment_id`.
1642
+ - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
1643
+ - **score** -- Prediction score of segment with `segment_id`.
1644
+ """
1645
+ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
1646
+ masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
1647
+
1648
+ batch_size = class_queries_logits.shape[0]
1649
+ num_labels = class_queries_logits.shape[-1] - 1
1650
+
1651
+ mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1652
+
1653
+ # Predicted label and score of each query (batch_size, num_queries)
1654
+ pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
1655
+
1656
+ # Loop over items in batch size
1657
+ results: List[Dict[str, TensorType]] = []
1658
+
1659
+ for i in range(batch_size):
1660
+ mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
1661
+ mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
1662
+ )
1663
+
1664
+ # No mask found
1665
+ if mask_probs_item.shape[0] <= 0:
1666
+ height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
1667
+ segmentation = torch.zeros((height, width)) - 1
1668
+ results.append({"segmentation": segmentation, "segments_info": []})
1669
+ continue
1670
+
1671
+ # Get segmentation map and segment information of batch item
1672
+ target_size = target_sizes[i] if target_sizes is not None else None
1673
+ segmentation, segments = compute_segments(
1674
+ mask_probs=mask_probs_item,
1675
+ pred_scores=pred_scores_item,
1676
+ pred_labels=pred_labels_item,
1677
+ mask_threshold=mask_threshold,
1678
+ overlap_mask_area_threshold=overlap_mask_area_threshold,
1679
+ label_ids_to_fuse=[],
1680
+ target_size=target_size,
1681
+ )
1682
+
1683
+ # Return segmentation map in run-length encoding (RLE) format
1684
+ if return_coco_annotation:
1685
+ segmentation = convert_segmentation_to_rle(segmentation)
1686
+
1687
+ results.append({"segmentation": segmentation, "segments_info": segments})
1688
+ return results
1689
+
1690
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_panoptic_segmentation with Detr->ConditionalDetr
1691
+ def post_process_panoptic_segmentation(
1692
+ self,
1693
+ outputs,
1694
+ threshold: float = 0.5,
1695
+ mask_threshold: float = 0.5,
1696
+ overlap_mask_area_threshold: float = 0.8,
1697
+ label_ids_to_fuse: Optional[Set[int]] = None,
1698
+ target_sizes: Optional[List[Tuple[int, int]]] = None,
1699
+ ) -> List[Dict]:
1700
+ """
1701
+ Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports
1702
+ PyTorch.
1703
+
1704
+ Args:
1705
+ outputs ([`ConditionalDetrForSegmentation`]):
1706
+ The outputs from [`ConditionalDetrForSegmentation`].
1707
+ threshold (`float`, *optional*, defaults to 0.5):
1708
+ The probability score threshold to keep predicted instance masks.
1709
+ mask_threshold (`float`, *optional*, defaults to 0.5):
1710
+ Threshold to use when turning the predicted masks into binary values.
1711
+ overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
1712
+ The overlap mask area threshold to merge or discard small disconnected parts within each binary
1713
+ instance mask.
1714
+ label_ids_to_fuse (`Set[int]`, *optional*):
1715
+ The labels in this state will have all their instances be fused together. For instance we could say
1716
+ there can only be one sky in an image, but several persons, so the label ID for sky would be in that
1717
+ set, but not the one for person.
1718
+ target_sizes (`List[Tuple]`, *optional*):
1719
+ List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
1720
+ final size (height, width) of each prediction in batch. If unset, predictions will not be resized.
1721
+ Returns:
1722
+ `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
1723
+ - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or
1724
+ `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to
1725
+ the corresponding `target_sizes` entry.
1726
+ - **segments_info** -- A dictionary that contains additional information on each segment.
1727
+ - **id** -- an integer representing the `segment_id`.
1728
+ - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
1729
+ - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
1730
+ Multiple instances of the same class / label were fused and assigned a single `segment_id`.
1731
+ - **score** -- Prediction score of segment with `segment_id`.
1732
+ """
1733
+
1734
+ if label_ids_to_fuse is None:
1735
+ logger.warning_once("`label_ids_to_fuse` unset. No instance will be fused.")
1736
+ label_ids_to_fuse = set()
1737
+
1738
+ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
1739
+ masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
1740
+
1741
+ batch_size = class_queries_logits.shape[0]
1742
+ num_labels = class_queries_logits.shape[-1] - 1
1743
+
1744
+ mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1745
+
1746
+ # Predicted label and score of each query (batch_size, num_queries)
1747
+ pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
1748
+
1749
+ # Loop over items in batch size
1750
+ results: List[Dict[str, TensorType]] = []
1751
+
1752
+ for i in range(batch_size):
1753
+ mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
1754
+ mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
1755
+ )
1756
+
1757
+ # No mask found
1758
+ if mask_probs_item.shape[0] <= 0:
1759
+ height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
1760
+ segmentation = torch.zeros((height, width)) - 1
1761
+ results.append({"segmentation": segmentation, "segments_info": []})
1762
+ continue
1763
+
1764
+ # Get segmentation map and segment information of batch item
1765
+ target_size = target_sizes[i] if target_sizes is not None else None
1766
+ segmentation, segments = compute_segments(
1767
+ mask_probs=mask_probs_item,
1768
+ pred_scores=pred_scores_item,
1769
+ pred_labels=pred_labels_item,
1770
+ mask_threshold=mask_threshold,
1771
+ overlap_mask_area_threshold=overlap_mask_area_threshold,
1772
+ label_ids_to_fuse=label_ids_to_fuse,
1773
+ target_size=target_size,
1774
+ )
1775
+
1776
+ results.append({"segmentation": segmentation, "segments_info": segments})
1777
+ return results
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/modeling_conditional_detr.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__init__.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_fastspeech2_conformer": [
25
+ "FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP",
26
+ "FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
27
+ "FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP",
28
+ "FastSpeech2ConformerConfig",
29
+ "FastSpeech2ConformerHifiGanConfig",
30
+ "FastSpeech2ConformerWithHifiGanConfig",
31
+ ],
32
+ "tokenization_fastspeech2_conformer": ["FastSpeech2ConformerTokenizer"],
33
+ }
34
+
35
+ try:
36
+ if not is_torch_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["modeling_fastspeech2_conformer"] = [
42
+ "FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
43
+ "FastSpeech2ConformerWithHifiGan",
44
+ "FastSpeech2ConformerHifiGan",
45
+ "FastSpeech2ConformerModel",
46
+ "FastSpeech2ConformerPreTrainedModel",
47
+ ]
48
+
49
+ if TYPE_CHECKING:
50
+ from .configuration_fastspeech2_conformer import (
51
+ FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP,
52
+ FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
53
+ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP,
54
+ FastSpeech2ConformerConfig,
55
+ FastSpeech2ConformerHifiGanConfig,
56
+ FastSpeech2ConformerWithHifiGanConfig,
57
+ )
58
+ from .tokenization_fastspeech2_conformer import FastSpeech2ConformerTokenizer
59
+
60
+ try:
61
+ if not is_torch_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ from .modeling_fastspeech2_conformer import (
67
+ FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
68
+ FastSpeech2ConformerHifiGan,
69
+ FastSpeech2ConformerModel,
70
+ FastSpeech2ConformerPreTrainedModel,
71
+ FastSpeech2ConformerWithHifiGan,
72
+ )
73
+
74
+ else:
75
+ import sys
76
+
77
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/configuration_fastspeech2_conformer.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_hifigan.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_model_with_hifigan.cpython-310.pyc ADDED
Binary file (2.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/modeling_fastspeech2_conformer.cpython-310.pyc ADDED
Binary file (57.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/tokenization_fastspeech2_conformer.cpython-310.pyc ADDED
Binary file (6.46 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ FastSpeech2Conformer model configuration"""
16
+
17
+ from typing import Dict
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import ( # noqa: F401, E402
27
+ FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP, # noqa: F401, E402
28
+ FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, # noqa: F401, E402
29
+ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP, # noqa: F401, E402
30
+ )
31
+
32
+
33
+ class FastSpeech2ConformerConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`FastSpeech2ConformerModel`]. It is used to
36
+ instantiate a FastSpeech2Conformer model according to the specified arguments, defining the model architecture.
37
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
38
+ FastSpeech2Conformer [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer)
39
+ architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+ Args:
45
+ hidden_size (`int`, *optional*, defaults to 384):
46
+ The dimensionality of the hidden layers.
47
+ vocab_size (`int`, *optional*, defaults to 78):
48
+ The size of the vocabulary.
49
+ num_mel_bins (`int`, *optional*, defaults to 80):
50
+ The number of mel filters used in the filter bank.
51
+ encoder_num_attention_heads (`int`, *optional*, defaults to 2):
52
+ The number of attention heads in the encoder.
53
+ encoder_layers (`int`, *optional*, defaults to 4):
54
+ The number of layers in the encoder.
55
+ encoder_linear_units (`int`, *optional*, defaults to 1536):
56
+ The number of units in the linear layer of the encoder.
57
+ decoder_layers (`int`, *optional*, defaults to 4):
58
+ The number of layers in the decoder.
59
+ decoder_num_attention_heads (`int`, *optional*, defaults to 2):
60
+ The number of attention heads in the decoder.
61
+ decoder_linear_units (`int`, *optional*, defaults to 1536):
62
+ The number of units in the linear layer of the decoder.
63
+ speech_decoder_postnet_layers (`int`, *optional*, defaults to 5):
64
+ The number of layers in the post-net of the speech decoder.
65
+ speech_decoder_postnet_units (`int`, *optional*, defaults to 256):
66
+ The number of units in the post-net layers of the speech decoder.
67
+ speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5):
68
+ The kernel size in the post-net of the speech decoder.
69
+ positionwise_conv_kernel_size (`int`, *optional*, defaults to 3):
70
+ The size of the convolution kernel used in the position-wise layer.
71
+ encoder_normalize_before (`bool`, *optional*, defaults to `False`):
72
+ Specifies whether to normalize before encoder layers.
73
+ decoder_normalize_before (`bool`, *optional*, defaults to `False`):
74
+ Specifies whether to normalize before decoder layers.
75
+ encoder_concat_after (`bool`, *optional*, defaults to `False`):
76
+ Specifies whether to concatenate after encoder layers.
77
+ decoder_concat_after (`bool`, *optional*, defaults to `False`):
78
+ Specifies whether to concatenate after decoder layers.
79
+ reduction_factor (`int`, *optional*, defaults to 1):
80
+ The factor by which the speech frame rate is reduced.
81
+ speaking_speed (`float`, *optional*, defaults to 1.0):
82
+ The speed of the speech produced.
83
+ use_macaron_style_in_conformer (`bool`, *optional*, defaults to `True`):
84
+ Specifies whether to use macaron style in the conformer.
85
+ use_cnn_in_conformer (`bool`, *optional*, defaults to `True`):
86
+ Specifies whether to use convolutional neural networks in the conformer.
87
+ encoder_kernel_size (`int`, *optional*, defaults to 7):
88
+ The kernel size used in the encoder.
89
+ decoder_kernel_size (`int`, *optional*, defaults to 31):
90
+ The kernel size used in the decoder.
91
+ duration_predictor_layers (`int`, *optional*, defaults to 2):
92
+ The number of layers in the duration predictor.
93
+ duration_predictor_channels (`int`, *optional*, defaults to 256):
94
+ The number of channels in the duration predictor.
95
+ duration_predictor_kernel_size (`int`, *optional*, defaults to 3):
96
+ The kernel size used in the duration predictor.
97
+ energy_predictor_layers (`int`, *optional*, defaults to 2):
98
+ The number of layers in the energy predictor.
99
+ energy_predictor_channels (`int`, *optional*, defaults to 256):
100
+ The number of channels in the energy predictor.
101
+ energy_predictor_kernel_size (`int`, *optional*, defaults to 3):
102
+ The kernel size used in the energy predictor.
103
+ energy_predictor_dropout (`float`, *optional*, defaults to 0.5):
104
+ The dropout rate in the energy predictor.
105
+ energy_embed_kernel_size (`int`, *optional*, defaults to 1):
106
+ The kernel size used in the energy embed layer.
107
+ energy_embed_dropout (`float`, *optional*, defaults to 0.0):
108
+ The dropout rate in the energy embed layer.
109
+ stop_gradient_from_energy_predictor (`bool`, *optional*, defaults to `False`):
110
+ Specifies whether to stop gradients from the energy predictor.
111
+ pitch_predictor_layers (`int`, *optional*, defaults to 5):
112
+ The number of layers in the pitch predictor.
113
+ pitch_predictor_channels (`int`, *optional*, defaults to 256):
114
+ The number of channels in the pitch predictor.
115
+ pitch_predictor_kernel_size (`int`, *optional*, defaults to 5):
116
+ The kernel size used in the pitch predictor.
117
+ pitch_predictor_dropout (`float`, *optional*, defaults to 0.5):
118
+ The dropout rate in the pitch predictor.
119
+ pitch_embed_kernel_size (`int`, *optional*, defaults to 1):
120
+ The kernel size used in the pitch embed layer.
121
+ pitch_embed_dropout (`float`, *optional*, defaults to 0.0):
122
+ The dropout rate in the pitch embed layer.
123
+ stop_gradient_from_pitch_predictor (`bool`, *optional*, defaults to `True`):
124
+ Specifies whether to stop gradients from the pitch predictor.
125
+ encoder_dropout_rate (`float`, *optional*, defaults to 0.2):
126
+ The dropout rate in the encoder.
127
+ encoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
128
+ The positional dropout rate in the encoder.
129
+ encoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
130
+ The attention dropout rate in the encoder.
131
+ decoder_dropout_rate (`float`, *optional*, defaults to 0.2):
132
+ The dropout rate in the decoder.
133
+ decoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
134
+ The positional dropout rate in the decoder.
135
+ decoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
136
+ The attention dropout rate in the decoder.
137
+ duration_predictor_dropout_rate (`float`, *optional*, defaults to 0.2):
138
+ The dropout rate in the duration predictor.
139
+ speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5):
140
+ The dropout rate in the speech decoder postnet.
141
+ max_source_positions (`int`, *optional*, defaults to 5000):
142
+ if `"relative"` position embeddings are used, defines the maximum source input positions.
143
+ use_masking (`bool`, *optional*, defaults to `True`):
144
+ Specifies whether to use masking in the model.
145
+ use_weighted_masking (`bool`, *optional*, defaults to `False`):
146
+ Specifies whether to use weighted masking in the model.
147
+ num_speakers (`int`, *optional*):
148
+ Number of speakers. If set to > 1, assume that the speaker ids will be provided as the input and use
149
+ speaker id embedding layer.
150
+ num_languages (`int`, *optional*):
151
+ Number of languages. If set to > 1, assume that the language ids will be provided as the input and use the
152
+ languge id embedding layer.
153
+ speaker_embed_dim (`int`, *optional*):
154
+ Speaker embedding dimension. If set to > 0, assume that speaker_embedding will be provided as the input.
155
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
156
+ Specifies whether the model is an encoder-decoder.
157
+
158
+ Example:
159
+
160
+ ```python
161
+ >>> from transformers import FastSpeech2ConformerModel, FastSpeech2ConformerConfig
162
+
163
+ >>> # Initializing a FastSpeech2Conformer style configuration
164
+ >>> configuration = FastSpeech2ConformerConfig()
165
+
166
+ >>> # Initializing a model from the FastSpeech2Conformer style configuration
167
+ >>> model = FastSpeech2ConformerModel(configuration)
168
+
169
+ >>> # Accessing the model configuration
170
+ >>> configuration = model.config
171
+ ```"""
172
+
173
+ model_type = "fastspeech2_conformer"
174
+ attribute_map = {"num_hidden_layers": "encoder_layers", "num_attention_heads": "encoder_num_attention_heads"}
175
+
176
+ def __init__(
177
+ self,
178
+ hidden_size=384,
179
+ vocab_size=78,
180
+ num_mel_bins=80,
181
+ encoder_num_attention_heads=2,
182
+ encoder_layers=4,
183
+ encoder_linear_units=1536,
184
+ decoder_layers=4,
185
+ decoder_num_attention_heads=2,
186
+ decoder_linear_units=1536,
187
+ speech_decoder_postnet_layers=5,
188
+ speech_decoder_postnet_units=256,
189
+ speech_decoder_postnet_kernel=5,
190
+ positionwise_conv_kernel_size=3,
191
+ encoder_normalize_before=False,
192
+ decoder_normalize_before=False,
193
+ encoder_concat_after=False,
194
+ decoder_concat_after=False,
195
+ reduction_factor=1,
196
+ speaking_speed=1.0,
197
+ use_macaron_style_in_conformer=True,
198
+ use_cnn_in_conformer=True,
199
+ encoder_kernel_size=7,
200
+ decoder_kernel_size=31,
201
+ duration_predictor_layers=2,
202
+ duration_predictor_channels=256,
203
+ duration_predictor_kernel_size=3,
204
+ energy_predictor_layers=2,
205
+ energy_predictor_channels=256,
206
+ energy_predictor_kernel_size=3,
207
+ energy_predictor_dropout=0.5,
208
+ energy_embed_kernel_size=1,
209
+ energy_embed_dropout=0.0,
210
+ stop_gradient_from_energy_predictor=False,
211
+ pitch_predictor_layers=5,
212
+ pitch_predictor_channels=256,
213
+ pitch_predictor_kernel_size=5,
214
+ pitch_predictor_dropout=0.5,
215
+ pitch_embed_kernel_size=1,
216
+ pitch_embed_dropout=0.0,
217
+ stop_gradient_from_pitch_predictor=True,
218
+ encoder_dropout_rate=0.2,
219
+ encoder_positional_dropout_rate=0.2,
220
+ encoder_attention_dropout_rate=0.2,
221
+ decoder_dropout_rate=0.2,
222
+ decoder_positional_dropout_rate=0.2,
223
+ decoder_attention_dropout_rate=0.2,
224
+ duration_predictor_dropout_rate=0.2,
225
+ speech_decoder_postnet_dropout=0.5,
226
+ max_source_positions=5000,
227
+ use_masking=True,
228
+ use_weighted_masking=False,
229
+ num_speakers=None,
230
+ num_languages=None,
231
+ speaker_embed_dim=None,
232
+ is_encoder_decoder=True,
233
+ **kwargs,
234
+ ):
235
+ if positionwise_conv_kernel_size % 2 == 0:
236
+ raise ValueError(
237
+ f"positionwise_conv_kernel_size must be odd, but got {positionwise_conv_kernel_size} instead."
238
+ )
239
+ if encoder_kernel_size % 2 == 0:
240
+ raise ValueError(f"encoder_kernel_size must be odd, but got {encoder_kernel_size} instead.")
241
+ if decoder_kernel_size % 2 == 0:
242
+ raise ValueError(f"decoder_kernel_size must be odd, but got {decoder_kernel_size} instead.")
243
+ if duration_predictor_kernel_size % 2 == 0:
244
+ raise ValueError(
245
+ f"duration_predictor_kernel_size must be odd, but got {duration_predictor_kernel_size} instead."
246
+ )
247
+ if energy_predictor_kernel_size % 2 == 0:
248
+ raise ValueError(
249
+ f"energy_predictor_kernel_size must be odd, but got {energy_predictor_kernel_size} instead."
250
+ )
251
+ if energy_embed_kernel_size % 2 == 0:
252
+ raise ValueError(f"energy_embed_kernel_size must be odd, but got {energy_embed_kernel_size} instead.")
253
+ if pitch_predictor_kernel_size % 2 == 0:
254
+ raise ValueError(
255
+ f"pitch_predictor_kernel_size must be odd, but got {pitch_predictor_kernel_size} instead."
256
+ )
257
+ if pitch_embed_kernel_size % 2 == 0:
258
+ raise ValueError(f"pitch_embed_kernel_size must be odd, but got {pitch_embed_kernel_size} instead.")
259
+ if hidden_size % encoder_num_attention_heads != 0:
260
+ raise ValueError("The hidden_size must be evenly divisible by encoder_num_attention_heads.")
261
+ if hidden_size % decoder_num_attention_heads != 0:
262
+ raise ValueError("The hidden_size must be evenly divisible by decoder_num_attention_heads.")
263
+ if use_masking and use_weighted_masking:
264
+ raise ValueError("Either use_masking or use_weighted_masking can be True, but not both.")
265
+
266
+ self.hidden_size = hidden_size
267
+ self.vocab_size = vocab_size
268
+ self.num_mel_bins = num_mel_bins
269
+ self.encoder_config = {
270
+ "num_attention_heads": encoder_num_attention_heads,
271
+ "layers": encoder_layers,
272
+ "kernel_size": encoder_kernel_size,
273
+ "attention_dropout_rate": encoder_attention_dropout_rate,
274
+ "dropout_rate": encoder_dropout_rate,
275
+ "positional_dropout_rate": encoder_positional_dropout_rate,
276
+ "linear_units": encoder_linear_units,
277
+ "normalize_before": encoder_normalize_before,
278
+ "concat_after": encoder_concat_after,
279
+ }
280
+ self.decoder_config = {
281
+ "num_attention_heads": decoder_num_attention_heads,
282
+ "layers": decoder_layers,
283
+ "kernel_size": decoder_kernel_size,
284
+ "attention_dropout_rate": decoder_attention_dropout_rate,
285
+ "dropout_rate": decoder_dropout_rate,
286
+ "positional_dropout_rate": decoder_positional_dropout_rate,
287
+ "linear_units": decoder_linear_units,
288
+ "normalize_before": decoder_normalize_before,
289
+ "concat_after": decoder_concat_after,
290
+ }
291
+ self.encoder_num_attention_heads = encoder_num_attention_heads
292
+ self.encoder_layers = encoder_layers
293
+ self.duration_predictor_channels = duration_predictor_channels
294
+ self.duration_predictor_kernel_size = duration_predictor_kernel_size
295
+ self.duration_predictor_layers = duration_predictor_layers
296
+ self.energy_embed_dropout = energy_embed_dropout
297
+ self.energy_embed_kernel_size = energy_embed_kernel_size
298
+ self.energy_predictor_channels = energy_predictor_channels
299
+ self.energy_predictor_dropout = energy_predictor_dropout
300
+ self.energy_predictor_kernel_size = energy_predictor_kernel_size
301
+ self.energy_predictor_layers = energy_predictor_layers
302
+ self.pitch_embed_dropout = pitch_embed_dropout
303
+ self.pitch_embed_kernel_size = pitch_embed_kernel_size
304
+ self.pitch_predictor_channels = pitch_predictor_channels
305
+ self.pitch_predictor_dropout = pitch_predictor_dropout
306
+ self.pitch_predictor_kernel_size = pitch_predictor_kernel_size
307
+ self.pitch_predictor_layers = pitch_predictor_layers
308
+ self.positionwise_conv_kernel_size = positionwise_conv_kernel_size
309
+ self.speech_decoder_postnet_units = speech_decoder_postnet_units
310
+ self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout
311
+ self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel
312
+ self.speech_decoder_postnet_layers = speech_decoder_postnet_layers
313
+ self.reduction_factor = reduction_factor
314
+ self.speaking_speed = speaking_speed
315
+ self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor
316
+ self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor
317
+ self.max_source_positions = max_source_positions
318
+ self.use_cnn_in_conformer = use_cnn_in_conformer
319
+ self.use_macaron_style_in_conformer = use_macaron_style_in_conformer
320
+ self.use_masking = use_masking
321
+ self.use_weighted_masking = use_weighted_masking
322
+ self.num_speakers = num_speakers
323
+ self.num_languages = num_languages
324
+ self.speaker_embed_dim = speaker_embed_dim
325
+ self.duration_predictor_dropout_rate = duration_predictor_dropout_rate
326
+ self.is_encoder_decoder = is_encoder_decoder
327
+
328
+ super().__init__(
329
+ is_encoder_decoder=is_encoder_decoder,
330
+ **kwargs,
331
+ )
332
+
333
+
334
+ class FastSpeech2ConformerHifiGanConfig(PretrainedConfig):
335
+ r"""
336
+ This is the configuration class to store the configuration of a [`FastSpeech2ConformerHifiGanModel`]. It is used to
337
+ instantiate a FastSpeech2Conformer HiFi-GAN vocoder model according to the specified arguments, defining the model
338
+ architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
339
+ FastSpeech2Conformer
340
+ [espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architecture.
341
+
342
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
343
+ documentation from [`PretrainedConfig`] for more information.
344
+
345
+ Args:
346
+ model_in_dim (`int`, *optional*, defaults to 80):
347
+ The number of frequency bins in the input log-mel spectrogram.
348
+ upsample_initial_channel (`int`, *optional*, defaults to 512):
349
+ The number of input channels into the upsampling network.
350
+ upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 2, 2]`):
351
+ A tuple of integers defining the stride of each 1D convolutional layer in the upsampling network. The
352
+ length of *upsample_rates* defines the number of convolutional layers and has to match the length of
353
+ *upsample_kernel_sizes*.
354
+ upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[16, 16, 4, 4]`):
355
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the upsampling network. The
356
+ length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match the length of
357
+ *upsample_rates*.
358
+ resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`):
359
+ A tuple of integers defining the kernel sizes of the 1D convolutional layers in the multi-receptive field
360
+ fusion (MRF) module.
361
+ resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
362
+ A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the
363
+ multi-receptive field fusion (MRF) module.
364
+ initializer_range (`float`, *optional*, defaults to 0.01):
365
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
366
+ leaky_relu_slope (`float`, *optional*, defaults to 0.1):
367
+ The angle of the negative slope used by the leaky ReLU activation.
368
+ normalize_before (`bool`, *optional*, defaults to `True`):
369
+ Whether or not to normalize the spectrogram before vocoding using the vocoder's learned mean and variance.
370
+
371
+ Example:
372
+
373
+ ```python
374
+ >>> from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig
375
+
376
+ >>> # Initializing a FastSpeech2ConformerHifiGan configuration
377
+ >>> configuration = FastSpeech2ConformerHifiGanConfig()
378
+
379
+ >>> # Initializing a model (with random weights) from the configuration
380
+ >>> model = FastSpeech2ConformerHifiGan(configuration)
381
+
382
+ >>> # Accessing the model configuration
383
+ >>> configuration = model.config
384
+ ```"""
385
+
386
+ model_type = "hifigan"
387
+
388
+ def __init__(
389
+ self,
390
+ model_in_dim=80,
391
+ upsample_initial_channel=512,
392
+ upsample_rates=[8, 8, 2, 2],
393
+ upsample_kernel_sizes=[16, 16, 4, 4],
394
+ resblock_kernel_sizes=[3, 7, 11],
395
+ resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
396
+ initializer_range=0.01,
397
+ leaky_relu_slope=0.1,
398
+ normalize_before=True,
399
+ **kwargs,
400
+ ):
401
+ self.model_in_dim = model_in_dim
402
+ self.upsample_initial_channel = upsample_initial_channel
403
+ self.upsample_rates = upsample_rates
404
+ self.upsample_kernel_sizes = upsample_kernel_sizes
405
+ self.resblock_kernel_sizes = resblock_kernel_sizes
406
+ self.resblock_dilation_sizes = resblock_dilation_sizes
407
+ self.initializer_range = initializer_range
408
+ self.leaky_relu_slope = leaky_relu_slope
409
+ self.normalize_before = normalize_before
410
+ super().__init__(**kwargs)
411
+
412
+
413
+ class FastSpeech2ConformerWithHifiGanConfig(PretrainedConfig):
414
+ """
415
+ This is the configuration class to store the configuration of a [`FastSpeech2ConformerWithHifiGan`]. It is used to
416
+ instantiate a `FastSpeech2ConformerWithHifiGanModel` model according to the specified sub-models configurations,
417
+ defining the model architecture.
418
+
419
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
420
+ FastSpeech2ConformerModel [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer) and
421
+ FastSpeech2ConformerHifiGan
422
+ [espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architectures.
423
+
424
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
425
+ documentation from [`PretrainedConfig`] for more information.
426
+
427
+ Args:
428
+ model_config (`typing.Dict`, *optional*):
429
+ Configuration of the text-to-speech model.
430
+ vocoder_config (`typing.Dict`, *optional*):
431
+ Configuration of the vocoder model.
432
+ model_config ([`FastSpeech2ConformerConfig`], *optional*):
433
+ Configuration of the text-to-speech model.
434
+ vocoder_config ([`FastSpeech2ConformerHiFiGanConfig`], *optional*):
435
+ Configuration of the vocoder model.
436
+
437
+ Example:
438
+
439
+ ```python
440
+ >>> from transformers import (
441
+ ... FastSpeech2ConformerConfig,
442
+ ... FastSpeech2ConformerHifiGanConfig,
443
+ ... FastSpeech2ConformerWithHifiGanConfig,
444
+ ... FastSpeech2ConformerWithHifiGan,
445
+ ... )
446
+
447
+ >>> # Initializing FastSpeech2ConformerWithHifiGan sub-modules configurations.
448
+ >>> model_config = FastSpeech2ConformerConfig()
449
+ >>> vocoder_config = FastSpeech2ConformerHifiGanConfig()
450
+
451
+ >>> # Initializing a FastSpeech2ConformerWithHifiGan module style configuration
452
+ >>> configuration = FastSpeech2ConformerWithHifiGanConfig(model_config.to_dict(), vocoder_config.to_dict())
453
+
454
+ >>> # Initializing a model (with random weights)
455
+ >>> model = FastSpeech2ConformerWithHifiGan(configuration)
456
+
457
+ >>> # Accessing the model configuration
458
+ >>> configuration = model.config
459
+ ```
460
+ """
461
+
462
+ model_type = "fastspeech2_conformer_with_hifigan"
463
+ is_composition = True
464
+
465
+ def __init__(
466
+ self,
467
+ model_config: Dict = None,
468
+ vocoder_config: Dict = None,
469
+ **kwargs,
470
+ ):
471
+ if model_config is None:
472
+ model_config = {}
473
+ logger.info("model_config is None. initializing the model with default values.")
474
+
475
+ if vocoder_config is None:
476
+ vocoder_config = {}
477
+ logger.info("vocoder_config is None. initializing the coarse model with default values.")
478
+
479
+ self.model_config = FastSpeech2ConformerConfig(**model_config)
480
+ self.vocoder_config = FastSpeech2ConformerHifiGanConfig(**vocoder_config)
481
+
482
+ super().__init__(**kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert FastSpeech2Conformer checkpoint."""
16
+
17
+ import argparse
18
+ import json
19
+ import re
20
+ from pathlib import Path
21
+ from tempfile import TemporaryDirectory
22
+
23
+ import torch
24
+ import yaml
25
+
26
+ from transformers import (
27
+ FastSpeech2ConformerConfig,
28
+ FastSpeech2ConformerModel,
29
+ FastSpeech2ConformerTokenizer,
30
+ logging,
31
+ )
32
+
33
+
34
+ logging.set_verbosity_info()
35
+ logger = logging.get_logger("transformers.models.FastSpeech2Conformer")
36
+
37
+ CONFIG_MAPPING = {
38
+ "adim": "hidden_size",
39
+ "aheads": "num_attention_heads",
40
+ "conformer_dec_kernel_size": "decoder_kernel_size",
41
+ "conformer_enc_kernel_size": "encoder_kernel_size",
42
+ "decoder_normalize_before": "decoder_normalize_before",
43
+ "dlayers": "decoder_layers",
44
+ "dunits": "decoder_linear_units",
45
+ "duration_predictor_chans": "duration_predictor_channels",
46
+ "duration_predictor_kernel_size": "duration_predictor_kernel_size",
47
+ "duration_predictor_layers": "duration_predictor_layers",
48
+ "elayers": "encoder_layers",
49
+ "encoder_normalize_before": "encoder_normalize_before",
50
+ "energy_embed_dropout": "energy_embed_dropout",
51
+ "energy_embed_kernel_size": "energy_embed_kernel_size",
52
+ "energy_predictor_chans": "energy_predictor_channels",
53
+ "energy_predictor_dropout": "energy_predictor_dropout",
54
+ "energy_predictor_kernel_size": "energy_predictor_kernel_size",
55
+ "energy_predictor_layers": "energy_predictor_layers",
56
+ "eunits": "encoder_linear_units",
57
+ "pitch_embed_dropout": "pitch_embed_dropout",
58
+ "pitch_embed_kernel_size": "pitch_embed_kernel_size",
59
+ "pitch_predictor_chans": "pitch_predictor_channels",
60
+ "pitch_predictor_dropout": "pitch_predictor_dropout",
61
+ "pitch_predictor_kernel_size": "pitch_predictor_kernel_size",
62
+ "pitch_predictor_layers": "pitch_predictor_layers",
63
+ "positionwise_conv_kernel_size": "positionwise_conv_kernel_size",
64
+ "postnet_chans": "speech_decoder_postnet_units",
65
+ "postnet_filts": "speech_decoder_postnet_kernel",
66
+ "postnet_layers": "speech_decoder_postnet_layers",
67
+ "reduction_factor": "reduction_factor",
68
+ "stop_gradient_from_energy_predictor": "stop_gradient_from_energy_predictor",
69
+ "stop_gradient_from_pitch_predictor": "stop_gradient_from_pitch_predictor",
70
+ "transformer_dec_attn_dropout_rate": "decoder_attention_dropout_rate",
71
+ "transformer_dec_dropout_rate": "decoder_dropout_rate",
72
+ "transformer_dec_positional_dropout_rate": "decoder_positional_dropout_rate",
73
+ "transformer_enc_attn_dropout_rate": "encoder_attention_dropout_rate",
74
+ "transformer_enc_dropout_rate": "encoder_dropout_rate",
75
+ "transformer_enc_positional_dropout_rate": "encoder_positional_dropout_rate",
76
+ "use_cnn_in_conformer": "use_cnn_in_conformer",
77
+ "use_macaron_style_in_conformer": "use_macaron_style_in_conformer",
78
+ "use_masking": "use_masking",
79
+ "use_weighted_masking": "use_weighted_masking",
80
+ "idim": "input_dim",
81
+ "odim": "num_mel_bins",
82
+ "spk_embed_dim": "speaker_embed_dim",
83
+ "langs": "num_languages",
84
+ "spks": "num_speakers",
85
+ }
86
+
87
+
88
+ def remap_model_yaml_config(yaml_config_path):
89
+ with Path(yaml_config_path).open("r", encoding="utf-8") as f:
90
+ args = yaml.safe_load(f)
91
+ args = argparse.Namespace(**args)
92
+
93
+ remapped_config = {}
94
+
95
+ model_params = args.tts_conf["text2mel_params"]
96
+ # espnet_config_key -> hf_config_key, any keys not included are ignored
97
+ for espnet_config_key, hf_config_key in CONFIG_MAPPING.items():
98
+ if espnet_config_key in model_params:
99
+ remapped_config[hf_config_key] = model_params[espnet_config_key]
100
+
101
+ return remapped_config, args.g2p, args.token_list
102
+
103
+
104
+ def convert_espnet_state_dict_to_hf(state_dict):
105
+ new_state_dict = {}
106
+ for key in state_dict:
107
+ if "tts.generator.text2mel." in key:
108
+ new_key = key.replace("tts.generator.text2mel.", "")
109
+ if "postnet" in key:
110
+ new_key = new_key.replace("postnet.postnet", "speech_decoder_postnet.layers")
111
+ new_key = new_key.replace(".0.weight", ".conv.weight")
112
+ new_key = new_key.replace(".1.weight", ".batch_norm.weight")
113
+ new_key = new_key.replace(".1.bias", ".batch_norm.bias")
114
+ new_key = new_key.replace(".1.running_mean", ".batch_norm.running_mean")
115
+ new_key = new_key.replace(".1.running_var", ".batch_norm.running_var")
116
+ new_key = new_key.replace(".1.num_batches_tracked", ".batch_norm.num_batches_tracked")
117
+ if "feat_out" in key:
118
+ if "weight" in key:
119
+ new_key = "speech_decoder_postnet.feat_out.weight"
120
+ if "bias" in key:
121
+ new_key = "speech_decoder_postnet.feat_out.bias"
122
+ if "encoder.embed.0.weight" in key:
123
+ new_key = new_key.replace("0.", "")
124
+ if "w_1" in key:
125
+ new_key = new_key.replace("w_1", "conv1")
126
+ if "w_2" in key:
127
+ new_key = new_key.replace("w_2", "conv2")
128
+ if "predictor.conv" in key:
129
+ new_key = new_key.replace(".conv", ".conv_layers")
130
+ pattern = r"(\d)\.(\d)"
131
+ replacement = (
132
+ r"\1.conv" if ("2.weight" not in new_key) and ("2.bias" not in new_key) else r"\1.layer_norm"
133
+ )
134
+ new_key = re.sub(pattern, replacement, new_key)
135
+ if "pitch_embed" in key or "energy_embed" in key:
136
+ new_key = new_key.replace("0", "conv")
137
+ if "encoders" in key:
138
+ new_key = new_key.replace("encoders", "conformer_layers")
139
+ new_key = new_key.replace("norm_final", "final_layer_norm")
140
+ new_key = new_key.replace("norm_mha", "self_attn_layer_norm")
141
+ new_key = new_key.replace("norm_ff_macaron", "ff_macaron_layer_norm")
142
+ new_key = new_key.replace("norm_ff", "ff_layer_norm")
143
+ new_key = new_key.replace("norm_conv", "conv_layer_norm")
144
+ if "lid_emb" in key:
145
+ new_key = new_key.replace("lid_emb", "language_id_embedding")
146
+ if "sid_emb" in key:
147
+ new_key = new_key.replace("sid_emb", "speaker_id_embedding")
148
+
149
+ new_state_dict[new_key] = state_dict[key]
150
+
151
+ return new_state_dict
152
+
153
+
154
+ @torch.no_grad()
155
+ def convert_FastSpeech2ConformerModel_checkpoint(
156
+ checkpoint_path,
157
+ yaml_config_path,
158
+ pytorch_dump_folder_path,
159
+ repo_id=None,
160
+ ):
161
+ model_params, tokenizer_name, vocab = remap_model_yaml_config(yaml_config_path)
162
+ config = FastSpeech2ConformerConfig(**model_params)
163
+
164
+ # Prepare the model
165
+ model = FastSpeech2ConformerModel(config)
166
+
167
+ espnet_checkpoint = torch.load(checkpoint_path)
168
+ hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint)
169
+
170
+ model.load_state_dict(hf_compatible_state_dict)
171
+
172
+ model.save_pretrained(pytorch_dump_folder_path)
173
+
174
+ # Prepare the tokenizer
175
+ with TemporaryDirectory() as tempdir:
176
+ vocab = {token: id for id, token in enumerate(vocab)}
177
+ vocab_file = Path(tempdir) / "vocab.json"
178
+ with open(vocab_file, "w") as f:
179
+ json.dump(vocab, f)
180
+ should_strip_spaces = "no_space" in tokenizer_name
181
+ tokenizer = FastSpeech2ConformerTokenizer(str(vocab_file), should_strip_spaces=should_strip_spaces)
182
+
183
+ tokenizer.save_pretrained(pytorch_dump_folder_path)
184
+
185
+ if repo_id:
186
+ print("Pushing to the hub...")
187
+ model.push_to_hub(repo_id)
188
+ tokenizer.push_to_hub(repo_id)
189
+
190
+
191
+ if __name__ == "__main__":
192
+ parser = argparse.ArgumentParser()
193
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
194
+ parser.add_argument(
195
+ "--yaml_config_path", required=True, default=None, type=str, help="Path to config.yaml of model to convert"
196
+ )
197
+ parser.add_argument(
198
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
199
+ )
200
+ parser.add_argument(
201
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
202
+ )
203
+
204
+ args = parser.parse_args()
205
+ convert_FastSpeech2ConformerModel_checkpoint(
206
+ args.checkpoint_path,
207
+ args.yaml_config_path,
208
+ args.pytorch_dump_folder_path,
209
+ args.push_to_hub,
210
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_hifigan.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert FastSpeech2Conformer HiFi-GAN checkpoint."""
16
+
17
+ import argparse
18
+ from pathlib import Path
19
+
20
+ import torch
21
+ import yaml
22
+
23
+ from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig, logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+ logger = logging.get_logger("transformers.models.FastSpeech2Conformer")
28
+
29
+
30
+ def load_weights(checkpoint, hf_model, config):
31
+ vocoder_key_prefix = "tts.generator.vocoder."
32
+ checkpoint = {k.replace(vocoder_key_prefix, ""): v for k, v in checkpoint.items() if vocoder_key_prefix in k}
33
+
34
+ hf_model.apply_weight_norm()
35
+
36
+ hf_model.conv_pre.weight_g.data = checkpoint["input_conv.weight_g"]
37
+ hf_model.conv_pre.weight_v.data = checkpoint["input_conv.weight_v"]
38
+ hf_model.conv_pre.bias.data = checkpoint["input_conv.bias"]
39
+
40
+ for i in range(len(config.upsample_rates)):
41
+ hf_model.upsampler[i].weight_g.data = checkpoint[f"upsamples.{i}.1.weight_g"]
42
+ hf_model.upsampler[i].weight_v.data = checkpoint[f"upsamples.{i}.1.weight_v"]
43
+ hf_model.upsampler[i].bias.data = checkpoint[f"upsamples.{i}.1.bias"]
44
+
45
+ for i in range(len(config.upsample_rates) * len(config.resblock_kernel_sizes)):
46
+ for j in range(len(config.resblock_dilation_sizes)):
47
+ hf_model.resblocks[i].convs1[j].weight_g.data = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
48
+ hf_model.resblocks[i].convs1[j].weight_v.data = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
49
+ hf_model.resblocks[i].convs1[j].bias.data = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
50
+
51
+ hf_model.resblocks[i].convs2[j].weight_g.data = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
52
+ hf_model.resblocks[i].convs2[j].weight_v.data = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
53
+ hf_model.resblocks[i].convs2[j].bias.data = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
54
+
55
+ hf_model.conv_post.weight_g.data = checkpoint["output_conv.1.weight_g"]
56
+ hf_model.conv_post.weight_v.data = checkpoint["output_conv.1.weight_v"]
57
+ hf_model.conv_post.bias.data = checkpoint["output_conv.1.bias"]
58
+
59
+ hf_model.remove_weight_norm()
60
+
61
+
62
+ def remap_hifigan_yaml_config(yaml_config_path):
63
+ with Path(yaml_config_path).open("r", encoding="utf-8") as f:
64
+ args = yaml.safe_load(f)
65
+ args = argparse.Namespace(**args)
66
+
67
+ vocoder_type = args.tts_conf["vocoder_type"]
68
+ if vocoder_type != "hifigan_generator":
69
+ raise TypeError(f"Vocoder config must be for `hifigan_generator`, but got {vocoder_type}")
70
+
71
+ remapped_dict = {}
72
+ vocoder_params = args.tts_conf["vocoder_params"]
73
+
74
+ # espnet_config_key -> hf_config_key
75
+ key_mappings = {
76
+ "channels": "upsample_initial_channel",
77
+ "in_channels": "model_in_dim",
78
+ "resblock_dilations": "resblock_dilation_sizes",
79
+ "resblock_kernel_sizes": "resblock_kernel_sizes",
80
+ "upsample_kernel_sizes": "upsample_kernel_sizes",
81
+ "upsample_scales": "upsample_rates",
82
+ }
83
+ for espnet_config_key, hf_config_key in key_mappings.items():
84
+ remapped_dict[hf_config_key] = vocoder_params[espnet_config_key]
85
+ remapped_dict["sampling_rate"] = args.tts_conf["sampling_rate"]
86
+ remapped_dict["normalize_before"] = False
87
+ remapped_dict["leaky_relu_slope"] = vocoder_params["nonlinear_activation_params"]["negative_slope"]
88
+
89
+ return remapped_dict
90
+
91
+
92
+ @torch.no_grad()
93
+ def convert_hifigan_checkpoint(
94
+ checkpoint_path,
95
+ pytorch_dump_folder_path,
96
+ yaml_config_path=None,
97
+ repo_id=None,
98
+ ):
99
+ if yaml_config_path is not None:
100
+ config_kwargs = remap_hifigan_yaml_config(yaml_config_path)
101
+ config = FastSpeech2ConformerHifiGanConfig(**config_kwargs)
102
+ else:
103
+ config = FastSpeech2ConformerHifiGanConfig()
104
+
105
+ model = FastSpeech2ConformerHifiGan(config)
106
+
107
+ orig_checkpoint = torch.load(checkpoint_path)
108
+ load_weights(orig_checkpoint, model, config)
109
+
110
+ model.save_pretrained(pytorch_dump_folder_path)
111
+
112
+ if repo_id:
113
+ print("Pushing to the hub...")
114
+ model.push_to_hub(repo_id)
115
+
116
+
117
+ if __name__ == "__main__":
118
+ parser = argparse.ArgumentParser()
119
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
120
+ parser.add_argument("--yaml_config_path", default=None, type=str, help="Path to config.yaml of model to convert")
121
+ parser.add_argument(
122
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
123
+ )
124
+ parser.add_argument(
125
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
126
+ )
127
+
128
+ args = parser.parse_args()
129
+ convert_hifigan_checkpoint(
130
+ args.checkpoint_path,
131
+ args.pytorch_dump_folder_path,
132
+ args.yaml_config_path,
133
+ args.push_to_hub,
134
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert FastSpeech2Conformer checkpoint."""
16
+
17
+ import argparse
18
+
19
+ import torch
20
+
21
+ from transformers import (
22
+ FastSpeech2ConformerConfig,
23
+ FastSpeech2ConformerHifiGan,
24
+ FastSpeech2ConformerHifiGanConfig,
25
+ FastSpeech2ConformerModel,
26
+ FastSpeech2ConformerWithHifiGan,
27
+ FastSpeech2ConformerWithHifiGanConfig,
28
+ logging,
29
+ )
30
+
31
+ from .convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch import (
32
+ convert_espnet_state_dict_to_hf,
33
+ remap_model_yaml_config,
34
+ )
35
+ from .convert_hifigan import load_weights, remap_hifigan_yaml_config
36
+
37
+
38
+ logging.set_verbosity_info()
39
+ logger = logging.get_logger("transformers.models.FastSpeech2Conformer")
40
+
41
+
42
+ def convert_FastSpeech2ConformerWithHifiGan_checkpoint(
43
+ checkpoint_path,
44
+ yaml_config_path,
45
+ pytorch_dump_folder_path,
46
+ repo_id=None,
47
+ ):
48
+ # Prepare the model
49
+ model_params, *_ = remap_model_yaml_config(yaml_config_path)
50
+ model_config = FastSpeech2ConformerConfig(**model_params)
51
+
52
+ model = FastSpeech2ConformerModel(model_config)
53
+
54
+ espnet_checkpoint = torch.load(checkpoint_path)
55
+ hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint)
56
+ model.load_state_dict(hf_compatible_state_dict)
57
+
58
+ # Prepare the vocoder
59
+ config_kwargs = remap_hifigan_yaml_config(yaml_config_path)
60
+ vocoder_config = FastSpeech2ConformerHifiGanConfig(**config_kwargs)
61
+
62
+ vocoder = FastSpeech2ConformerHifiGan(vocoder_config)
63
+ load_weights(espnet_checkpoint, vocoder, vocoder_config)
64
+
65
+ # Prepare the model + vocoder
66
+ config = FastSpeech2ConformerWithHifiGanConfig.from_sub_model_configs(model_config, vocoder_config)
67
+ with_hifigan_model = FastSpeech2ConformerWithHifiGan(config)
68
+ with_hifigan_model.model = model
69
+ with_hifigan_model.vocoder = vocoder
70
+
71
+ with_hifigan_model.save_pretrained(pytorch_dump_folder_path)
72
+
73
+ if repo_id:
74
+ print("Pushing to the hub...")
75
+ with_hifigan_model.push_to_hub(repo_id)
76
+
77
+
78
+ if __name__ == "__main__":
79
+ parser = argparse.ArgumentParser()
80
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
81
+ parser.add_argument(
82
+ "--yaml_config_path", required=True, default=None, type=str, help="Path to config.yaml of model to convert"
83
+ )
84
+ parser.add_argument(
85
+ "--pytorch_dump_folder_path",
86
+ required=True,
87
+ default=None,
88
+ type=str,
89
+ help="Path to the output `FastSpeech2ConformerModel` PyTorch model.",
90
+ )
91
+ parser.add_argument(
92
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
93
+ )
94
+
95
+ args = parser.parse_args()
96
+
97
+ convert_FastSpeech2ConformerWithHifiGan_checkpoint(
98
+ args.checkpoint_path,
99
+ args.yaml_config_path,
100
+ args.pytorch_dump_folder_path,
101
+ args.push_to_hub,
102
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py ADDED
@@ -0,0 +1,1684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Espnet authors, IMS Toucan authors, and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch FastSpeech2Conformer model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ from torch import nn
23
+
24
+ from ...modeling_outputs import BaseModelOutput
25
+ from ...modeling_utils import PreTrainedModel
26
+ from ...utils import ModelOutput, add_start_docstrings, logging, replace_return_docstrings
27
+ from .configuration_fastspeech2_conformer import (
28
+ FastSpeech2ConformerConfig,
29
+ FastSpeech2ConformerHifiGanConfig,
30
+ FastSpeech2ConformerWithHifiGanConfig,
31
+ )
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ from ..deprecated._archive_maps import FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
38
+
39
+
40
+ @dataclass
41
+ class FastSpeech2ConformerModelOutput(ModelOutput):
42
+ """
43
+ Output type of [`FastSpeech2ConformerModel`].
44
+
45
+ Args:
46
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
47
+ Spectrogram generation loss.
48
+ spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`):
49
+ The predicted spectrogram.
50
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
51
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
52
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
53
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
54
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
55
+
56
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
57
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
58
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
59
+ sequence_length)`.
60
+
61
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
62
+ self-attention heads.
63
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
64
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
65
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
66
+
67
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
68
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
69
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
70
+ sequence_length)`.
71
+
72
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
73
+ self-attention heads.
74
+ duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
75
+ Outputs of the duration predictor.
76
+ pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
77
+ Outputs of the pitch predictor.
78
+ energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
79
+ Outputs of the energy predictor.
80
+
81
+ """
82
+
83
+ loss: Optional[torch.FloatTensor] = None
84
+ spectrogram: torch.FloatTensor = None
85
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
86
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
87
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
88
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
89
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
90
+ duration_outputs: torch.LongTensor = None
91
+ pitch_outputs: torch.FloatTensor = None
92
+ energy_outputs: torch.FloatTensor = None
93
+
94
+
95
+ @dataclass
96
+ class FastSpeech2ConformerWithHifiGanOutput(FastSpeech2ConformerModelOutput):
97
+ """
98
+ Output type of [`FastSpeech2ConformerWithHifiGan`].
99
+
100
+ Args:
101
+ waveform (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
102
+ Speech output as a result of passing the predicted mel spectrogram through the vocoder.
103
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
104
+ Spectrogram generation loss.
105
+ spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`):
106
+ The predicted spectrogram.
107
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
108
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
109
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
110
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
111
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
112
+
113
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
114
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
115
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
116
+ sequence_length)`.
117
+
118
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
119
+ self-attention heads.
120
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
121
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
122
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
123
+
124
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
125
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
126
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
127
+ sequence_length)`.
128
+
129
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
130
+ self-attention heads.
131
+ duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
132
+ Outputs of the duration predictor.
133
+ pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
134
+ Outputs of the pitch predictor.
135
+ energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
136
+ Outputs of the energy predictor.
137
+ """
138
+
139
+ waveform: torch.FloatTensor = None
140
+
141
+
142
+ _CONFIG_FOR_DOC = "FastSpeech2ConformerConfig"
143
+
144
+ FASTSPEECH2_CONFORMER_START_DOCSTRING = r"""
145
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
146
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
147
+ etc.)
148
+
149
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
150
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
151
+ and behavior.
152
+
153
+ Parameters:
154
+ config ([`FastSpeech2ConformerConfig`]):
155
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
156
+ load the weights associated with the model, only the configuration. Check out the
157
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
158
+ """
159
+
160
+
161
+ HIFIGAN_START_DOCSTRING = r"""
162
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
163
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
164
+ etc.)
165
+
166
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
167
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
168
+ and behavior.
169
+
170
+ Parameters:
171
+ config ([`FastSpeech2ConformerConfig`]):
172
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
173
+ load the weights associated with the model, only the configuration. Check out the
174
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
175
+ """
176
+
177
+ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_START_DOCSTRING = r"""
178
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
179
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
180
+ etc.)
181
+
182
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
183
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
184
+ and behavior.
185
+
186
+ Parameters:
187
+ config ([`FastSpeech2ConformerWithHifiGanConfig`]):
188
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
189
+ load the weights associated with the model, only the configuration. Check out the
190
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
191
+ """
192
+
193
+
194
+ def length_regulator(encoded_embeddings, duration_labels, speaking_speed=1.0):
195
+ """
196
+ Length regulator for feed-forward Transformer.
197
+
198
+ This is the length regulator module described in `FastSpeech: Fast, Robust and Controllable Text to Speech`
199
+ https://arxiv.org/pdf/1905.09263.pdf. The length regulator expands char or phoneme-level embedding features to
200
+ frame-level by repeating each feature based on the corresponding predicted durations.
201
+
202
+ Args:
203
+ encoded_embeddings (`torch.Tensor` of shape `(batch_size, max_text_length, embedding_dim)`):
204
+ Batch of sequences of char or phoneme embeddings.
205
+ duration_labels (`torch.LongTensor` of shape `(batch_size, time)`):
206
+ Batch of durations of each frame.
207
+ speaking_speed (`float`, *optional*, defaults to 1.0):
208
+ Value to control speed of speech.
209
+
210
+ Returns:
211
+ `torch.Tensor`:
212
+ Replicated input tensor based on durations (batch_size, time*, embedding_dim).
213
+ """
214
+
215
+ if speaking_speed <= 0:
216
+ raise ValueError("`speaking_speed` must be greater than 0.")
217
+ elif speaking_speed != 1.0:
218
+ duration_labels = torch.round(duration_labels.float() * speaking_speed).long()
219
+
220
+ if duration_labels.sum() == 0:
221
+ duration_labels[duration_labels.sum(dim=1).eq(0)] = 1
222
+
223
+ # Calculate the maximum length needed
224
+ max_len = torch.sum(duration_labels, dim=1).max()
225
+
226
+ # Create a padded tensor to hold the results
227
+ hidden_states = torch.zeros(
228
+ (encoded_embeddings.size(0), max_len, encoded_embeddings.size(2)),
229
+ dtype=torch.float,
230
+ device=encoded_embeddings.device,
231
+ )
232
+
233
+ # Loop through the batch and fill in the data
234
+ for i, (encoded_embedding, target_duration) in enumerate(zip(encoded_embeddings, duration_labels)):
235
+ repeated = torch.repeat_interleave(encoded_embedding, target_duration, dim=0)
236
+ hidden_states[i, : repeated.size(0)] = repeated
237
+
238
+ return hidden_states
239
+
240
+
241
+ class FastSpeech2ConformerDurationPredictor(nn.Module):
242
+ """
243
+ Duration predictor module.
244
+
245
+ This is a module of duration predictor described in the paper 'FastSpeech: Fast, Robust and Controllable Text to
246
+ Speech' https://arxiv.org/pdf/1905.09263.pdf The duration predictor predicts a duration of each frame in log domain
247
+ from the hidden embeddings of encoder.
248
+
249
+ Note:
250
+ The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`, the
251
+ outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
252
+
253
+ """
254
+
255
+ def __init__(self, config: FastSpeech2ConformerConfig):
256
+ super().__init__()
257
+
258
+ self.conv_layers = nn.ModuleList()
259
+ self.log_domain_offset = 1.0
260
+
261
+ for layer_idx in range(config.duration_predictor_layers):
262
+ num_chans = config.duration_predictor_channels
263
+ input_channels = config.hidden_size if layer_idx == 0 else num_chans
264
+ layer = FastSpeech2ConformerPredictorLayer(
265
+ input_channels,
266
+ num_chans,
267
+ config.duration_predictor_kernel_size,
268
+ config.duration_predictor_dropout_rate,
269
+ )
270
+ self.conv_layers.append(layer)
271
+ self.linear = nn.Linear(config.duration_predictor_channels, 1)
272
+
273
+ def forward(self, encoder_hidden_states):
274
+ """
275
+ Args:
276
+ hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
277
+ Batch of input sequences.
278
+ padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
279
+ Batch of masks indicating padded part.
280
+
281
+ Returns:
282
+ `torch.Tensor`: Batch of predicted durations in log domain `(batch_size, max_text_length)`.
283
+
284
+ """
285
+ # (batch_size, input_dim, max_text_length)
286
+ hidden_states = encoder_hidden_states.transpose(1, -1)
287
+ for layer in self.conv_layers:
288
+ hidden_states = layer(hidden_states)
289
+
290
+ # NOTE: calculate in log domain, (batch_size, max_text_length)
291
+ hidden_states = self.linear(hidden_states.transpose(1, -1)).squeeze(-1)
292
+
293
+ if not self.training:
294
+ # NOTE: calculate in linear domain
295
+ hidden_states = torch.clamp(torch.round(hidden_states.exp() - self.log_domain_offset), min=0).long()
296
+
297
+ return hidden_states
298
+
299
+
300
+ # Copied from transformers.models.speecht5.modeling_speecht5.SpeechT5BatchNormConvLayer
301
+ class FastSpeech2ConformerBatchNormConvLayer(nn.Module):
302
+ def __init__(self, config, layer_id=0):
303
+ super().__init__()
304
+
305
+ if layer_id == 0:
306
+ in_conv_dim = config.num_mel_bins
307
+ else:
308
+ in_conv_dim = config.speech_decoder_postnet_units
309
+
310
+ if layer_id == config.speech_decoder_postnet_layers - 1:
311
+ out_conv_dim = config.num_mel_bins
312
+ else:
313
+ out_conv_dim = config.speech_decoder_postnet_units
314
+
315
+ self.conv = nn.Conv1d(
316
+ in_conv_dim,
317
+ out_conv_dim,
318
+ kernel_size=config.speech_decoder_postnet_kernel,
319
+ stride=1,
320
+ padding=(config.speech_decoder_postnet_kernel - 1) // 2,
321
+ bias=False,
322
+ )
323
+ self.batch_norm = nn.BatchNorm1d(out_conv_dim)
324
+
325
+ if layer_id < config.speech_decoder_postnet_layers - 1:
326
+ self.activation = nn.Tanh()
327
+ else:
328
+ self.activation = None
329
+
330
+ self.dropout = nn.Dropout(config.speech_decoder_postnet_dropout)
331
+
332
+ def forward(self, hidden_states):
333
+ hidden_states = self.conv(hidden_states)
334
+ hidden_states = self.batch_norm(hidden_states)
335
+ if self.activation is not None:
336
+ hidden_states = self.activation(hidden_states)
337
+ hidden_states = self.dropout(hidden_states)
338
+ return hidden_states
339
+
340
+
341
+ class FastSpeech2ConformerSpeechDecoderPostnet(nn.Module):
342
+ def __init__(self, config):
343
+ super().__init__()
344
+ self.config = config
345
+ self.feat_out = nn.Linear(config.hidden_size, config.num_mel_bins * config.reduction_factor)
346
+ self.layers = nn.ModuleList(
347
+ [FastSpeech2ConformerBatchNormConvLayer(config, i) for i in range(config.speech_decoder_postnet_layers)]
348
+ )
349
+
350
+ def forward(self, hidden_states: torch.Tensor):
351
+ outputs_before_postnet = self.feat_out(hidden_states).view(hidden_states.size(0), -1, self.config.num_mel_bins)
352
+ layer_output = outputs_before_postnet.transpose(1, 2)
353
+ for layer in self.layers:
354
+ layer_output = layer(layer_output)
355
+ outputs_after_postnet = outputs_before_postnet + layer_output.transpose(1, 2)
356
+ return outputs_before_postnet, outputs_after_postnet
357
+
358
+
359
+ class FastSpeech2ConformerPredictorLayer(nn.Module):
360
+ def __init__(self, input_channels, num_chans, kernel_size, dropout_rate):
361
+ super().__init__()
362
+ self.conv = nn.Conv1d(
363
+ input_channels,
364
+ num_chans,
365
+ kernel_size,
366
+ stride=1,
367
+ padding=(kernel_size - 1) // 2,
368
+ )
369
+ self.activation = nn.ReLU()
370
+ self.layer_norm = nn.LayerNorm(num_chans)
371
+ self.dropout = nn.Dropout(dropout_rate)
372
+
373
+ def forward(self, hidden_states):
374
+ hidden_states = self.conv(hidden_states)
375
+ hidden_states = self.activation(hidden_states)
376
+
377
+ # Perform layer norm on dimension 1
378
+ hidden_states = hidden_states.transpose(1, -1)
379
+ hidden_states = self.layer_norm(hidden_states)
380
+ hidden_states = hidden_states.transpose(1, -1)
381
+
382
+ hidden_states = self.dropout(hidden_states)
383
+
384
+ return hidden_states
385
+
386
+
387
+ class FastSpeech2ConformerVariancePredictor(nn.Module):
388
+ def __init__(
389
+ self,
390
+ config: FastSpeech2ConformerConfig,
391
+ num_layers=2,
392
+ num_chans=384,
393
+ kernel_size=3,
394
+ dropout_rate=0.5,
395
+ ):
396
+ """
397
+ Initilize variance predictor module.
398
+
399
+ Args:
400
+ input_dim (`int`): Input dimension.
401
+ num_layers (`int`, *optional*, defaults to 2): Number of convolutional layers.
402
+ num_chans (`int`, *optional*, defaults to 384): Number of channels of convolutional layers.
403
+ kernel_size (`int`, *optional*, defaults to 3): Kernel size of convolutional layers.
404
+ dropout_rate (`float`, *optional*, defaults to 0.5): Dropout rate.
405
+ """
406
+ super().__init__()
407
+ self.conv_layers = nn.ModuleList()
408
+ for idx in range(num_layers):
409
+ input_channels = config.hidden_size if idx == 0 else num_chans
410
+ layer = FastSpeech2ConformerPredictorLayer(input_channels, num_chans, kernel_size, dropout_rate)
411
+ self.conv_layers.append(layer)
412
+ self.linear = nn.Linear(num_chans, 1)
413
+
414
+ def forward(self, encoder_hidden_states, padding_masks=None):
415
+ """
416
+ Calculate forward propagation.
417
+
418
+ Args:
419
+ encoder_hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
420
+ Batch of input sequences.
421
+ padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
422
+ Batch of masks indicating padded part.
423
+
424
+ Returns:
425
+ Tensor: Batch of predicted sequences `(batch_size, max_text_length, 1)`.
426
+ """
427
+ # (batch_size, input_dim, max_text_length)
428
+ hidden_states = encoder_hidden_states.transpose(1, -1)
429
+ for layer in self.conv_layers:
430
+ hidden_states = layer(hidden_states)
431
+
432
+ hidden_states = self.linear(hidden_states.transpose(1, 2))
433
+
434
+ if padding_masks is not None:
435
+ hidden_states = hidden_states.masked_fill(padding_masks, 0.0)
436
+
437
+ return hidden_states
438
+
439
+
440
+ class FastSpeech2ConformerVarianceEmbedding(nn.Module):
441
+ def __init__(
442
+ self,
443
+ in_channels=1,
444
+ out_channels=384,
445
+ kernel_size=1,
446
+ padding=0,
447
+ dropout_rate=0.0,
448
+ ):
449
+ super().__init__()
450
+ self.conv = nn.Conv1d(
451
+ in_channels=in_channels,
452
+ out_channels=out_channels,
453
+ kernel_size=kernel_size,
454
+ padding=padding,
455
+ )
456
+ self.dropout = nn.Dropout(dropout_rate)
457
+
458
+ def forward(self, hidden_states):
459
+ hidden_states = hidden_states.transpose(1, 2)
460
+ hidden_states = self.conv(hidden_states)
461
+ hidden_states = self.dropout(hidden_states)
462
+ hidden_states = hidden_states.transpose(1, 2)
463
+ return hidden_states
464
+
465
+
466
+ class FastSpeech2ConformerAttention(nn.Module):
467
+ """
468
+ Multi-Head attention layer with relative position encoding. Details can be found in
469
+ https://github.com/espnet/espnet/pull/2816. Paper: https://arxiv.org/abs/1901.02860.
470
+ """
471
+
472
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
473
+ """Construct an FastSpeech2ConformerAttention object."""
474
+ super().__init__()
475
+ # We assume d_v always equals dim_key
476
+ self.num_heads = module_config["num_attention_heads"]
477
+ self.hidden_size = config.hidden_size
478
+ self.dim_key = self.hidden_size // self.num_heads
479
+ self.head_dim = self.hidden_size // self.num_heads
480
+ self.linear_q = nn.Linear(self.hidden_size, self.hidden_size)
481
+ self.linear_k = nn.Linear(self.hidden_size, self.hidden_size)
482
+ self.linear_v = nn.Linear(self.hidden_size, self.hidden_size)
483
+ self.linear_out = nn.Linear(self.hidden_size, self.hidden_size)
484
+ self.dropout = nn.Dropout(p=module_config["attention_dropout_rate"])
485
+
486
+ # linear transformation for positional encoding
487
+ self.linear_pos = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
488
+ # these two learnable bias are used in matrix c and matrix d
489
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
490
+ self.pos_bias_u = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
491
+ self.pos_bias_v = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
492
+
493
+ def shift_relative_position_tensor(self, pos_tensor):
494
+ """
495
+ Args:
496
+ pos_tensor (torch.Tensor of shape (batch_size, head, time1, 2*time1-1)): Input tensor.
497
+ """
498
+ zero_pad = torch.zeros((*pos_tensor.size()[:3], 1), device=pos_tensor.device, dtype=pos_tensor.dtype)
499
+ pos_tensor_padded = torch.cat([zero_pad, pos_tensor], dim=-1)
500
+
501
+ pos_tensor_padded = pos_tensor_padded.view(*pos_tensor.size()[:2], pos_tensor.size(3) + 1, pos_tensor.size(2))
502
+ # only keep the positions from 0 to time2
503
+ pos_tensor = pos_tensor_padded[:, :, 1:].view_as(pos_tensor)[:, :, :, : pos_tensor.size(-1) // 2 + 1]
504
+
505
+ return pos_tensor
506
+
507
+ def forward(
508
+ self,
509
+ hidden_states: torch.Tensor,
510
+ attention_mask: Optional[torch.Tensor] = None,
511
+ pos_emb: Optional[torch.Tensor] = None,
512
+ output_attentions: Optional[torch.Tensor] = False,
513
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
514
+ """
515
+ Compute 'Scaled Dot Product Attention' with rel. positional encoding.
516
+
517
+ Args:
518
+ hidden_states (`torch.Tensor` of shape `(batch, time2, size)`): Values of the hidden states
519
+ attention_mask (`torch.Tensor` of shape `(batch, time1, time2)`): Mask tensor.
520
+ pos_emb (`torch.Tensor` of shape `(batch, 2*time1-1, size)`): Positional embedding tensor.
521
+ output_attentions (`bool`, *optional*):
522
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
523
+ returned tensors for more detail.
524
+ Returns:
525
+ `torch.Tensor`: Output tensor of shape `(batch, time1, d_model)`.
526
+ """
527
+ bsz, q_len, _ = hidden_states.size()
528
+ query_states = self.linear_q(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
529
+ key_states = self.linear_k(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
530
+ value_states = self.linear_v(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
531
+
532
+ bsz_pos = pos_emb.size(0)
533
+ pos_encoding = self.linear_pos(pos_emb).view(bsz_pos, -1, self.num_heads, self.head_dim)
534
+
535
+ # (batch_size, head, time1, dim_key)
536
+ query_with_bias_u = (query_states + self.pos_bias_u).transpose(1, 2)
537
+ # (batch_size, head, time1, dim_key)
538
+ query_with_bias_v = (query_states + self.pos_bias_v).transpose(1, 2)
539
+
540
+ # compute attention score
541
+ # first compute matrix a and matrix c
542
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
543
+ # (batch_size, head, time1, time2)
544
+ matrix_ac = torch.matmul(query_with_bias_u, key_states.permute(0, 2, 3, 1))
545
+
546
+ # compute matrix b and matrix d
547
+ # (batch_size, head, time1, 2*time1-1)
548
+ matrix_bd = torch.matmul(query_with_bias_v, pos_encoding.permute(0, 2, 3, 1))
549
+ matrix_bd = self.shift_relative_position_tensor(matrix_bd)
550
+
551
+ # (batch_size, head, time1, time2)
552
+ scores = (matrix_ac + matrix_bd) / math.sqrt(self.dim_key)
553
+
554
+ # Forward attention
555
+ if attention_mask is not None:
556
+ expected_size = (bsz, 1, q_len)
557
+ if attention_mask.size() != expected_size:
558
+ raise ValueError(f"Attention mask should be of size {expected_size}, but is {attention_mask.size()}")
559
+ attention_mask = attention_mask.unsqueeze(1).eq(0)
560
+ min_value = float(torch.finfo(scores.dtype).min)
561
+ scores = scores.masked_fill(attention_mask, min_value)
562
+ attn_weights = torch.softmax(scores, dim=-1).masked_fill(attention_mask, 0.0)
563
+ else:
564
+ attn_weights = torch.softmax(scores, dim=-1)
565
+
566
+ attn_weights = self.dropout(attn_weights)
567
+ attn_output = torch.matmul(attn_weights, value_states.transpose(1, 2))
568
+ attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, q_len, -1)
569
+
570
+ attn_output = self.linear_out(attn_output)
571
+
572
+ if not output_attentions:
573
+ attn_weights = None
574
+
575
+ return attn_output, attn_weights
576
+
577
+
578
+ class FastSpeech2ConformerConvolutionModule(nn.Module):
579
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
580
+ super().__init__()
581
+ # kernel_size should be an odd number for 'SAME' padding
582
+ channels = config.hidden_size
583
+ kernel_size = module_config["kernel_size"]
584
+ self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=True)
585
+ self.depthwise_conv = nn.Conv1d(
586
+ channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias=True
587
+ )
588
+ self.norm = nn.BatchNorm1d(channels)
589
+ self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=True)
590
+
591
+ def forward(self, hidden_states):
592
+ """
593
+ Compute convolution module.
594
+
595
+ Args:
596
+ hidden_states (`torch.Tensor` of shape `(batch, time, channels)`): Input tensor.
597
+
598
+ Returns:
599
+ `torch.Tensor`: Output tensor of shape `(batch, time, channels)`.
600
+
601
+ """
602
+ # exchange the temporal dimension and the feature dimension
603
+ hidden_states = hidden_states.transpose(1, 2)
604
+
605
+ # GLU mechanism, (batch_size, 2*channel, dim)
606
+ hidden_states = self.pointwise_conv1(hidden_states)
607
+ # (batch_size, channel, dim)
608
+ hidden_states = nn.functional.glu(hidden_states, dim=1)
609
+
610
+ # 1D Depthwise Conv
611
+ hidden_states = self.depthwise_conv(hidden_states)
612
+ hidden_states = self.norm(hidden_states)
613
+
614
+ hidden_states = hidden_states * torch.sigmoid(hidden_states)
615
+
616
+ hidden_states = self.pointwise_conv2(hidden_states)
617
+
618
+ return hidden_states.transpose(1, 2)
619
+
620
+
621
+ class FastSpeech2ConformerEncoderLayer(nn.Module):
622
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
623
+ super().__init__()
624
+
625
+ # self-attention module definition
626
+ self.self_attn = FastSpeech2ConformerAttention(config, module_config)
627
+
628
+ # feed-forward module definition
629
+ self.feed_forward = FastSpeech2ConformerMultiLayeredConv1d(config, module_config)
630
+
631
+ self.macaron_style = config.use_macaron_style_in_conformer
632
+ if self.macaron_style:
633
+ self.feed_forward_macaron = FastSpeech2ConformerMultiLayeredConv1d(config, module_config)
634
+ self.ff_macaron_layer_norm = nn.LayerNorm(config.hidden_size)
635
+ self.ff_scale = 0.5
636
+ else:
637
+ self.ff_scale = 1.0
638
+
639
+ # convolution module definition
640
+ self.use_cnn_module = config.use_cnn_in_conformer
641
+ if self.use_cnn_module:
642
+ self.conv_module = FastSpeech2ConformerConvolutionModule(config, module_config)
643
+ self.conv_layer_norm = nn.LayerNorm(config.hidden_size)
644
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size)
645
+
646
+ self.ff_layer_norm = nn.LayerNorm(config.hidden_size)
647
+
648
+ self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size)
649
+
650
+ self.dropout = nn.Dropout(module_config["dropout_rate"])
651
+ self.size = config.hidden_size
652
+ self.normalize_before = module_config["normalize_before"]
653
+ self.concat_after = module_config["concat_after"]
654
+ if self.concat_after:
655
+ self.concat_linear = nn.Linear(config.hidden_size + config.hidden_size, config.hidden_size)
656
+
657
+ def forward(
658
+ self,
659
+ hidden_states: torch.Tensor,
660
+ pos_emb: Optional[torch.Tensor] = None,
661
+ attention_mask: Optional[torch.Tensor] = None,
662
+ output_attentions: Optional[torch.Tensor] = False,
663
+ ):
664
+ """
665
+ Compute encoded features.
666
+
667
+ Args:
668
+ hidden_states (`torch.Tensor` of shape `(batch, time, size)`): Input tensor.
669
+ pos_emb (`torch.Tensor` of shape `(1, time, size)`): Positional embeddings tensor.
670
+ attention_mask (`torch.Tensor` of shape `(batch, time)`): Attention mask tensor for the input.
671
+ output_attentions (`bool`, *optional*):
672
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
673
+ returned tensors for more detail.
674
+ Returns:
675
+ `torch.Tensor`: Output tensor of shape `(batch, time, size)`.
676
+
677
+ """
678
+ # whether to use macaron style
679
+ if self.macaron_style:
680
+ residual = hidden_states
681
+ if self.normalize_before:
682
+ hidden_states = self.ff_macaron_layer_norm(hidden_states)
683
+ hidden_states = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(hidden_states))
684
+ if not self.normalize_before:
685
+ hidden_states = self.ff_macaron_layer_norm(hidden_states)
686
+
687
+ # multi-headed self-attention module
688
+ residual = hidden_states
689
+ if self.normalize_before:
690
+ hidden_states = self.self_attn_layer_norm(hidden_states)
691
+
692
+ attention_output, attention_scores = self.self_attn(
693
+ hidden_states, attention_mask=attention_mask, pos_emb=pos_emb, output_attentions=output_attentions
694
+ )
695
+
696
+ if self.concat_after:
697
+ x_concat = torch.cat((hidden_states, attention_output), dim=-1)
698
+ hidden_states = self.concat_linear(x_concat)
699
+ hidden_states = residual + hidden_states
700
+ else:
701
+ hidden_states = self.dropout(attention_output)
702
+ hidden_states = residual + hidden_states
703
+ if not self.normalize_before:
704
+ hidden_states = self.self_attn_layer_norm(hidden_states)
705
+
706
+ # convolution module
707
+ if self.use_cnn_module:
708
+ residual = hidden_states
709
+ if self.normalize_before:
710
+ hidden_states = self.conv_layer_norm(hidden_states)
711
+ hidden_states = self.conv_module(hidden_states)
712
+ hidden_states = self.dropout(hidden_states)
713
+ hidden_states = residual + hidden_states
714
+ if not self.normalize_before:
715
+ hidden_states = self.conv_layer_norm(hidden_states)
716
+
717
+ # feed forward module
718
+ residual = hidden_states
719
+ if self.normalize_before:
720
+ hidden_states = self.ff_layer_norm(hidden_states)
721
+ hidden_states = self.feed_forward(hidden_states)
722
+ hidden_states = self.dropout(hidden_states)
723
+ hidden_states = residual + self.ff_scale * hidden_states
724
+ if not self.normalize_before:
725
+ hidden_states = self.ff_layer_norm(hidden_states)
726
+
727
+ if self.conv_module is not None:
728
+ hidden_states = self.final_layer_norm(hidden_states)
729
+
730
+ outputs = (hidden_states,)
731
+
732
+ if output_attentions:
733
+ outputs += (attention_scores,)
734
+
735
+ return outputs
736
+
737
+
738
+ class FastSpeech2ConformerMultiLayeredConv1d(nn.Module):
739
+ """
740
+ Multi-layered conv1d for Transformer block.
741
+
742
+ This is a module of multi-layered conv1d designed to replace positionwise feed-forward network in Transformer
743
+ block, which is introduced in 'FastSpeech: Fast, Robust and Controllable Text to Speech'
744
+ https://arxiv.org/pdf/1905.09263.pdf
745
+ """
746
+
747
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
748
+ """
749
+ Initialize FastSpeech2ConformerMultiLayeredConv1d module.
750
+
751
+ Args:
752
+ input_channels (`int`): Number of input channels.
753
+ hidden_channels (`int`): Number of hidden channels.
754
+ kernel_size (`int`): Kernel size of conv1d.
755
+ dropout_rate (`float`): Dropout rate.
756
+ """
757
+ super().__init__()
758
+ input_channels = config.hidden_size
759
+ hidden_channels = module_config["linear_units"]
760
+ kernel_size = config.positionwise_conv_kernel_size
761
+ self.conv1 = nn.Conv1d(input_channels, hidden_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
762
+ self.conv2 = nn.Conv1d(hidden_channels, input_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
763
+ self.dropout = nn.Dropout(module_config["dropout_rate"])
764
+
765
+ def forward(self, hidden_states):
766
+ """
767
+ Calculate forward propagation.
768
+
769
+ Args:
770
+ hidden_states (torch.Tensor): Batch of input tensors (batch_size, time, input_channels).
771
+
772
+ Returns:
773
+ torch.Tensor: Batch of output tensors (batch_size, time, hidden_channels).
774
+ """
775
+ hidden_states = hidden_states.transpose(-1, 1)
776
+ hidden_states = self.conv1(hidden_states)
777
+ hidden_states = torch.relu(hidden_states)
778
+ hidden_states = self.dropout(hidden_states)
779
+ hidden_states = self.conv2(hidden_states)
780
+ hidden_states = hidden_states.transpose(-1, 1)
781
+ return hidden_states
782
+
783
+
784
+ class FastSpeech2ConformerRelPositionalEncoding(nn.Module):
785
+ """
786
+ Args:
787
+ Relative positional encoding module (new implementation). Details can be found in
788
+ https://github.com/espnet/espnet/pull/2816. See : Appendix Batch in https://arxiv.org/abs/1901.02860
789
+ config (`FastSpeech2ConformerConfig`):
790
+ FastSpeech2ConformerConfig instance.
791
+ module_config (`dict`):
792
+ Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`.
793
+ """
794
+
795
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
796
+ """
797
+ Construct an PositionalEncoding object.
798
+ """
799
+ super().__init__()
800
+ self.embed_dim = config.hidden_size
801
+ self.input_scale = math.sqrt(self.embed_dim)
802
+ self.dropout = nn.Dropout(p=module_config["positional_dropout_rate"])
803
+ self.pos_enc = None
804
+ self.max_len = 5000
805
+ self.extend_pos_enc(torch.tensor(0.0).expand(1, self.max_len))
806
+
807
+ def extend_pos_enc(self, x):
808
+ """Reset the positional encodings."""
809
+ if self.pos_enc is not None:
810
+ # self.pos_enc contains both positive and negative parts
811
+ # the length of self.pos_enc is 2 * input_len - 1
812
+ if self.pos_enc.size(1) >= x.size(1) * 2 - 1:
813
+ if self.pos_enc.dtype != x.dtype or self.pos_enc.device != x.device:
814
+ self.pos_enc = self.pos_enc.to(dtype=x.dtype, device=x.device)
815
+ return
816
+ # Suppose `i` means to the position of query vector and `j` means the
817
+ # position of key vector. We use position relative positions when keys
818
+ # are to the left (i>j) and negative relative positions otherwise (i<j).
819
+ pos_enc_positive = torch.zeros(x.size(1), self.embed_dim)
820
+ pos_enc_negative = torch.zeros(x.size(1), self.embed_dim)
821
+ position = torch.arange(0, x.size(1), dtype=torch.int64).float().unsqueeze(1)
822
+ div_term = torch.exp(
823
+ torch.arange(0, self.embed_dim, 2, dtype=torch.int64).float() * -(math.log(10000.0) / self.embed_dim)
824
+ )
825
+ pos_enc_positive[:, 0::2] = torch.sin(position * div_term)
826
+ pos_enc_positive[:, 1::2] = torch.cos(position * div_term)
827
+ pos_enc_negative[:, 0::2] = torch.sin(-1 * position * div_term)
828
+ pos_enc_negative[:, 1::2] = torch.cos(-1 * position * div_term)
829
+
830
+ # Reserve the order of positive indices and concat both positive and
831
+ # negative indices. This is used to support the shifting trick
832
+ # as in https://arxiv.org/abs/1901.02860
833
+ pos_enc_positive = torch.flip(pos_enc_positive, [0]).unsqueeze(0)
834
+ pos_enc_negative = pos_enc_negative[1:].unsqueeze(0)
835
+ pos_enc = torch.cat([pos_enc_positive, pos_enc_negative], dim=1)
836
+ self.pos_enc = pos_enc.to(device=x.device, dtype=x.dtype)
837
+
838
+ def forward(self, feature_representation):
839
+ """
840
+ Args:
841
+ feature_representation (`torch.Tensor` of shape (batch_size, time, `*`)):
842
+ Input tensor.
843
+
844
+ Returns:
845
+ `torch.Tensor`: Encoded tensor (batch_size, time, `*`).
846
+ """
847
+ self.extend_pos_enc(feature_representation)
848
+ hidden_states = feature_representation * self.input_scale
849
+ center_idx = self.pos_enc.size(1) // 2
850
+ pos_emb = self.pos_enc[:, center_idx - hidden_states.size(1) + 1 : center_idx + hidden_states.size(1)]
851
+ return self.dropout(hidden_states), self.dropout(pos_emb)
852
+
853
+
854
+ class FastSpeech2ConformerEncoder(nn.Module):
855
+ """
856
+ FastSpeech2ConformerEncoder encoder module.
857
+
858
+ Args:
859
+ config (`FastSpeech2ConformerConfig`):
860
+ FastSpeech2ConformerConfig instance.
861
+ module_config (`dict`):
862
+ Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`.
863
+ use_encoder_input_layer (`bool`, *optional*, defaults to `False`):
864
+ Input layer type.
865
+ """
866
+
867
+ def __init__(
868
+ self,
869
+ config: FastSpeech2ConformerConfig,
870
+ module_config,
871
+ use_encoder_input_layer=False,
872
+ ):
873
+ super().__init__()
874
+
875
+ self.embed = None
876
+ if use_encoder_input_layer:
877
+ self.embed = nn.Embedding(
878
+ num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, padding_idx=0
879
+ )
880
+
881
+ self.pos_enc = FastSpeech2ConformerRelPositionalEncoding(config, module_config)
882
+
883
+ self.conformer_layers = nn.ModuleList(
884
+ [FastSpeech2ConformerEncoderLayer(config, module_config) for _ in range(module_config["layers"])]
885
+ )
886
+
887
+ def forward(
888
+ self,
889
+ input_tensor: torch.LongTensor,
890
+ attention_mask: Optional[bool] = None,
891
+ output_hidden_states: Optional[bool] = None,
892
+ output_attentions: Optional[bool] = False,
893
+ return_dict: Optional[bool] = None,
894
+ ):
895
+ """
896
+ Args:
897
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
898
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
899
+ provide it.
900
+
901
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
902
+ [`PreTrainedTokenizer.__call__`] for details.
903
+
904
+ [What are input IDs?](../glossary#input-ids)
905
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
906
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
907
+
908
+ - 1 for tokens that are **not masked**,
909
+ - 0 for tokens that are **masked**.
910
+
911
+ [What are attention masks?](../glossary#attention-mask)
912
+ output_hidden_states (`bool`, *optional*):
913
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
914
+ for more detail.
915
+ output_attentions (`bool`, *optional*):
916
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
917
+ returned tensors for more detail.
918
+ return_dict (`bool`, *optional*):
919
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
920
+ Returns:
921
+ `torch.Tensor`:
922
+ Output tensor of shape `(batch, time, attention_dim)`.
923
+ """
924
+ feature_representation = input_tensor
925
+ if self.embed is not None:
926
+ feature_representation = self.embed(feature_representation)
927
+
928
+ hidden_states, pos_emb = self.pos_enc(feature_representation)
929
+
930
+ all_hidden_states = () if output_hidden_states else None
931
+ all_self_attentions = () if output_attentions else None
932
+
933
+ for conformer_layer in self.conformer_layers:
934
+ if output_hidden_states:
935
+ all_hidden_states = all_hidden_states + (hidden_states,)
936
+
937
+ layer_outputs = conformer_layer(hidden_states, pos_emb, attention_mask, output_attentions)
938
+ hidden_states = layer_outputs[0]
939
+
940
+ if output_attentions:
941
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
942
+
943
+ # Add last layer
944
+ if output_hidden_states:
945
+ all_hidden_states = all_hidden_states + (hidden_states,)
946
+
947
+ if not return_dict:
948
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
949
+ return BaseModelOutput(
950
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions
951
+ )
952
+
953
+
954
+ class FastSpeech2ConformerLoss(nn.Module):
955
+ def __init__(self, config: FastSpeech2ConformerConfig):
956
+ super().__init__()
957
+
958
+ use_masking = config.use_masking
959
+ use_weighted_masking = config.use_weighted_masking
960
+
961
+ if use_masking and use_weighted_masking:
962
+ raise ValueError("Either use_masking or use_weighted_masking can be True, but not both.")
963
+
964
+ self.use_masking = use_masking
965
+ self.use_weighted_masking = use_weighted_masking
966
+
967
+ # define criterions
968
+ reduction = "none" if self.use_weighted_masking else "mean"
969
+ self.l1_criterion = nn.L1Loss(reduction=reduction)
970
+ self.mse_criterion = nn.MSELoss(reduction=reduction)
971
+ self.duration_criterion = nn.MSELoss(reduction=reduction)
972
+ self.log_domain_offset = 1.0
973
+
974
+ def forward(
975
+ self,
976
+ outputs_after_postnet,
977
+ outputs_before_postnet,
978
+ duration_outputs,
979
+ pitch_outputs,
980
+ energy_outputs,
981
+ spectrogram_labels,
982
+ duration_labels,
983
+ pitch_labels,
984
+ energy_labels,
985
+ duration_mask,
986
+ spectrogram_mask,
987
+ ):
988
+ """
989
+ Args:
990
+ outputs_after_postnet (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
991
+ Batch of outputs after postnet.
992
+ outputs_before_postnet (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
993
+ Batch of outputs before postnet.
994
+ duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length)`):
995
+ Batch of outputs of duration predictor.
996
+ pitch_outputs (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
997
+ Batch of outputs of pitch predictor.
998
+ energy_outputs (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
999
+ Batch of outputs of energy predictor.
1000
+ spectrogram_labels (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
1001
+ Batch of target features.
1002
+ duration_labels (`torch.LongTensor` of shape `(batch_size, max_text_length)`): Batch of durations.
1003
+ pitch_labels (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
1004
+ Batch of target token-averaged pitch.
1005
+ energy_labels (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
1006
+ Batch of target token-averaged energy.
1007
+ duration_mask (`torch.LongTensor`):
1008
+ Mask used to discern which values the duration loss should be calculated for.
1009
+ spectrogram_mask (`torch.LongTensor`):
1010
+ Mask used to discern which values the spectrogam loss should be calculated for.
1011
+
1012
+ Returns:
1013
+ `tuple(torch.FloatTensor)`: Tuple of tensors containing, in order, the L1 loss value, duration predictor
1014
+ loss value, pitch predictor loss value, and energy predictor loss value.
1015
+
1016
+ """
1017
+ pitch_and_energy_masks = duration_mask.unsqueeze(-1)
1018
+
1019
+ # apply mask to remove padded part
1020
+ if self.use_masking:
1021
+ outputs_before_postnet = outputs_before_postnet.masked_select(spectrogram_mask)
1022
+ if outputs_after_postnet is not None:
1023
+ outputs_after_postnet = outputs_after_postnet.masked_select(spectrogram_mask)
1024
+ spectrogram_labels = spectrogram_labels.masked_select(spectrogram_mask)
1025
+ duration_outputs = duration_outputs.masked_select(duration_mask)
1026
+ duration_labels = duration_labels.masked_select(duration_mask)
1027
+ pitch_outputs = pitch_outputs.masked_select(pitch_and_energy_masks)
1028
+ energy_outputs = energy_outputs.masked_select(pitch_and_energy_masks)
1029
+ pitch_labels = pitch_labels.masked_select(pitch_and_energy_masks)
1030
+ energy_labels = energy_labels.masked_select(pitch_and_energy_masks)
1031
+
1032
+ # calculate loss
1033
+ l1_loss = self.l1_criterion(outputs_before_postnet, spectrogram_labels)
1034
+ if outputs_after_postnet is not None:
1035
+ l1_loss = l1_loss + self.l1_criterion(outputs_after_postnet, spectrogram_labels)
1036
+ duration_labels = torch.log(duration_labels.float() + self.log_domain_offset)
1037
+ duration_loss = self.duration_criterion(duration_outputs, duration_labels)
1038
+ pitch_loss = self.mse_criterion(pitch_outputs, pitch_labels)
1039
+ energy_loss = self.mse_criterion(energy_outputs, energy_labels)
1040
+
1041
+ # make weighted mask and apply it
1042
+ if self.use_weighted_masking:
1043
+ spectrogram_mask = nn.functional.pad(
1044
+ spectrogram_mask.transpose(1, 2),
1045
+ [0, spectrogram_labels.size(1) - spectrogram_mask.size(1), 0, 0, 0, 0],
1046
+ value=False,
1047
+ ).transpose(1, 2)
1048
+
1049
+ out_weights = spectrogram_mask.float() / spectrogram_mask.sum(dim=1, keepdim=True).float()
1050
+ out_weights /= spectrogram_labels.size(0) * spectrogram_labels.size(2)
1051
+ duration_weights = duration_mask.float() / duration_mask.sum(dim=1, keepdim=True).float()
1052
+ duration_weights /= duration_labels.size(0)
1053
+
1054
+ # apply weight
1055
+ l1_loss = l1_loss.mul(out_weights).masked_select(spectrogram_mask).sum()
1056
+ duration_loss = duration_loss.mul(duration_weights).masked_select(duration_mask).sum()
1057
+ pitch_weights = duration_weights.unsqueeze(-1)
1058
+ pitch_loss = pitch_loss.mul(pitch_weights).masked_select(pitch_and_energy_masks).sum()
1059
+ energy_loss = energy_loss.mul(pitch_weights).masked_select(pitch_and_energy_masks).sum()
1060
+
1061
+ return l1_loss + duration_loss + pitch_loss + energy_loss
1062
+
1063
+
1064
+ class FastSpeech2ConformerPreTrainedModel(PreTrainedModel):
1065
+ """
1066
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1067
+ models.
1068
+ """
1069
+
1070
+ config_class = FastSpeech2ConformerConfig
1071
+ base_model_prefix = "fastspeech2_conformer"
1072
+
1073
+ main_input_name = "input_ids"
1074
+
1075
+ def _init_weights(self, module):
1076
+ """Initialize the weights"""
1077
+ if isinstance(module, (nn.LayerNorm)):
1078
+ module.bias.data.zero_()
1079
+ module.weight.data.fill_(1.0)
1080
+ elif isinstance(module, nn.Conv1d):
1081
+ nn.init.kaiming_normal_(module.weight)
1082
+ if module.bias is not None:
1083
+ key = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
1084
+ nn.init.uniform_(module.bias, a=-key, b=key)
1085
+ elif isinstance(module, nn.Embedding):
1086
+ module.weight.data.normal_()
1087
+ if module.padding_idx is not None:
1088
+ module.weight.data[module.padding_idx].zero_()
1089
+ elif isinstance(module, FastSpeech2ConformerAttention):
1090
+ nn.init.xavier_uniform_(module.pos_bias_u)
1091
+ nn.init.xavier_uniform_(module.pos_bias_v)
1092
+
1093
+ def _set_gradient_checkpointing(self, module, value=False):
1094
+ if isinstance(module, FastSpeech2ConformerEncoder):
1095
+ module.gradient_checkpointing = value
1096
+
1097
+
1098
+ @add_start_docstrings(
1099
+ """FastSpeech2Conformer Model.""",
1100
+ FASTSPEECH2_CONFORMER_START_DOCSTRING,
1101
+ )
1102
+ class FastSpeech2ConformerModel(FastSpeech2ConformerPreTrainedModel):
1103
+ """
1104
+ FastSpeech 2 module.
1105
+
1106
+ This is a module of FastSpeech 2 described in 'FastSpeech 2: Fast and High-Quality End-to-End Text to Speech'
1107
+ https://arxiv.org/abs/2006.04558. Instead of quantized pitch and energy, we use token-averaged value introduced in
1108
+ FastPitch: Parallel Text-to-speech with Pitch Prediction. The encoder and decoder are Conformers instead of regular
1109
+ Transformers.
1110
+ """
1111
+
1112
+ def __init__(self, config: FastSpeech2ConformerConfig):
1113
+ super().__init__(config)
1114
+ self.config = config
1115
+
1116
+ # store hyperparameters
1117
+ self.vocab_size = config.vocab_size
1118
+ self.num_mel_bins = config.num_mel_bins
1119
+ self.hidden_size = config.hidden_size
1120
+ self.reduction_factor = config.reduction_factor
1121
+ self.stop_gradient_from_pitch_predictor = config.stop_gradient_from_pitch_predictor
1122
+ self.stop_gradient_from_energy_predictor = config.stop_gradient_from_energy_predictor
1123
+
1124
+ self.multilingual_model = config.num_languages is not None and config.num_languages > 1
1125
+ if self.multilingual_model:
1126
+ self.language_id_embedding = torch.nn.Embedding(config.num_languages, self.hidden_size)
1127
+
1128
+ self.multispeaker_model = config.num_speakers is not None and config.num_speakers > 1
1129
+ if self.multispeaker_model:
1130
+ self.speaker_id_embedding = torch.nn.Embedding(config.num_speakers, config.hidden_size)
1131
+
1132
+ self.speaker_embed_dim = config.speaker_embed_dim
1133
+ if self.speaker_embed_dim:
1134
+ self.projection = nn.Linear(config.hidden_size + self.speaker_embed_dim, config.hidden_size)
1135
+
1136
+ self.encoder = FastSpeech2ConformerEncoder(config, config.encoder_config, use_encoder_input_layer=True)
1137
+
1138
+ self.duration_predictor = FastSpeech2ConformerDurationPredictor(config)
1139
+
1140
+ self.pitch_predictor = FastSpeech2ConformerVariancePredictor(
1141
+ config,
1142
+ num_layers=config.pitch_predictor_layers,
1143
+ num_chans=config.pitch_predictor_channels,
1144
+ kernel_size=config.pitch_predictor_kernel_size,
1145
+ dropout_rate=config.pitch_predictor_dropout,
1146
+ )
1147
+ # continuous pitch + FastPitch style avg
1148
+ self.pitch_embed = FastSpeech2ConformerVarianceEmbedding(
1149
+ out_channels=self.hidden_size,
1150
+ kernel_size=config.pitch_embed_kernel_size,
1151
+ padding=(config.pitch_embed_kernel_size - 1) // 2,
1152
+ dropout_rate=config.pitch_embed_dropout,
1153
+ )
1154
+
1155
+ self.energy_predictor = FastSpeech2ConformerVariancePredictor(
1156
+ config,
1157
+ num_layers=config.energy_predictor_layers,
1158
+ num_chans=config.energy_predictor_channels,
1159
+ kernel_size=config.energy_predictor_kernel_size,
1160
+ dropout_rate=config.energy_predictor_dropout,
1161
+ )
1162
+ # continuous energy + FastPitch style avg
1163
+ self.energy_embed = FastSpeech2ConformerVarianceEmbedding(
1164
+ out_channels=self.hidden_size,
1165
+ kernel_size=config.energy_embed_kernel_size,
1166
+ padding=(config.energy_embed_kernel_size - 1) // 2,
1167
+ dropout_rate=config.energy_embed_dropout,
1168
+ )
1169
+
1170
+ # The decoder is an encoder
1171
+ self.decoder = FastSpeech2ConformerEncoder(config, config.decoder_config, use_encoder_input_layer=False)
1172
+
1173
+ self.speech_decoder_postnet = FastSpeech2ConformerSpeechDecoderPostnet(config)
1174
+
1175
+ self.criterion = FastSpeech2ConformerLoss(config)
1176
+
1177
+ self.post_init()
1178
+
1179
+ @replace_return_docstrings(output_type=FastSpeech2ConformerModelOutput, config_class=_CONFIG_FOR_DOC)
1180
+ def forward(
1181
+ self,
1182
+ input_ids: torch.LongTensor,
1183
+ attention_mask: Optional[torch.LongTensor] = None,
1184
+ spectrogram_labels: Optional[torch.FloatTensor] = None,
1185
+ duration_labels: Optional[torch.LongTensor] = None,
1186
+ pitch_labels: Optional[torch.FloatTensor] = None,
1187
+ energy_labels: Optional[torch.FloatTensor] = None,
1188
+ speaker_ids: Optional[torch.LongTensor] = None,
1189
+ lang_ids: Optional[torch.LongTensor] = None,
1190
+ speaker_embedding: Optional[torch.FloatTensor] = None,
1191
+ return_dict: Optional[bool] = None,
1192
+ output_attentions: Optional[bool] = None,
1193
+ output_hidden_states: Optional[bool] = None,
1194
+ ) -> Union[Tuple, FastSpeech2ConformerModelOutput]:
1195
+ """
1196
+ Args:
1197
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1198
+ Input sequence of text vectors.
1199
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*, defaults to `None`):
1200
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
1201
+ `[0, 1]`: 0 for tokens that are **masked**, 1 for tokens that are **not masked**.
1202
+ spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`):
1203
+ Batch of padded target features.
1204
+ duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`):
1205
+ Batch of padded durations.
1206
+ pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
1207
+ Batch of padded token-averaged pitch.
1208
+ energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
1209
+ Batch of padded token-averaged energy.
1210
+ speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
1211
+ Speaker ids used to condition features of speech output by the model.
1212
+ lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
1213
+ Language ids used to condition features of speech output by the model.
1214
+ speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`):
1215
+ Embedding containing conditioning signals for the features of the speech.
1216
+ return_dict (`bool`, *optional*, defaults to `None`):
1217
+ Whether or not to return a [`FastSpeech2ConformerModelOutput`] instead of a plain tuple.
1218
+ output_attentions (`bool`, *optional*, defaults to `None`):
1219
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1220
+ returned tensors for more detail.
1221
+ output_hidden_states (`bool`, *optional*, defaults to `None`):
1222
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1223
+ for more detail.
1224
+
1225
+ Returns:
1226
+
1227
+ Example:
1228
+
1229
+ ```python
1230
+ >>> from transformers import (
1231
+ ... FastSpeech2ConformerTokenizer,
1232
+ ... FastSpeech2ConformerModel,
1233
+ ... FastSpeech2ConformerHifiGan,
1234
+ ... )
1235
+
1236
+ >>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
1237
+ >>> inputs = tokenizer("some text to convert to speech", return_tensors="pt")
1238
+ >>> input_ids = inputs["input_ids"]
1239
+
1240
+ >>> model = FastSpeech2ConformerModel.from_pretrained("espnet/fastspeech2_conformer")
1241
+ >>> output_dict = model(input_ids, return_dict=True)
1242
+ >>> spectrogram = output_dict["spectrogram"]
1243
+
1244
+ >>> vocoder = FastSpeech2ConformerHifiGan.from_pretrained("espnet/fastspeech2_conformer_hifigan")
1245
+ >>> waveform = vocoder(spectrogram)
1246
+ >>> print(waveform.shape)
1247
+ torch.Size([1, 49664])
1248
+ ```
1249
+ """
1250
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1251
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1252
+ output_hidden_states = (
1253
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1254
+ )
1255
+
1256
+ if attention_mask is None:
1257
+ attention_mask = torch.ones(input_ids.shape, device=input_ids.device)
1258
+
1259
+ has_missing_labels = (
1260
+ spectrogram_labels is None or duration_labels is None or pitch_labels is None or energy_labels is None
1261
+ )
1262
+ if self.training and has_missing_labels:
1263
+ raise ValueError("All labels must be provided to run in training mode.")
1264
+
1265
+ # forward encoder
1266
+ text_masks = attention_mask.unsqueeze(-2)
1267
+
1268
+ encoder_outputs = self.encoder(
1269
+ input_ids,
1270
+ text_masks,
1271
+ output_hidden_states=output_hidden_states,
1272
+ output_attentions=output_attentions,
1273
+ return_dict=return_dict,
1274
+ )
1275
+ hidden_states = encoder_outputs[0]
1276
+
1277
+ # Integrate with language id, speaker id, and speaker embedding
1278
+ if self.multispeaker_model and speaker_ids is not None:
1279
+ speaker_id_embeddings = self.speaker_id_embedding(speaker_ids.view(-1))
1280
+ hidden_states = hidden_states + speaker_id_embeddings.unsqueeze(1)
1281
+
1282
+ if self.multilingual_model and lang_ids is not None:
1283
+ language_id_embbedings = self.language_id_embedding(lang_ids.view(-1))
1284
+ hidden_states = hidden_states + language_id_embbedings.unsqueeze(1)
1285
+
1286
+ if self.speaker_embed_dim is not None and speaker_embedding is not None:
1287
+ embeddings_expanded = (
1288
+ nn.functional.normalize(speaker_embedding).unsqueeze(1).expand(-1, hidden_states.size(1), -1)
1289
+ )
1290
+ hidden_states = self.projection(torch.cat([hidden_states, embeddings_expanded], dim=-1))
1291
+
1292
+ # forward duration predictor and variance predictors
1293
+ duration_mask = ~attention_mask.bool()
1294
+
1295
+ if self.stop_gradient_from_pitch_predictor:
1296
+ pitch_predictions = self.pitch_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1))
1297
+ else:
1298
+ pitch_predictions = self.pitch_predictor(hidden_states, duration_mask.unsqueeze(-1))
1299
+
1300
+ if self.stop_gradient_from_energy_predictor:
1301
+ energy_predictions = self.energy_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1))
1302
+ else:
1303
+ energy_predictions = self.energy_predictor(hidden_states, duration_mask.unsqueeze(-1))
1304
+
1305
+ duration_predictions = self.duration_predictor(hidden_states)
1306
+ duration_predictions = duration_predictions.masked_fill(duration_mask, 0.0)
1307
+
1308
+ if not self.training:
1309
+ # use prediction in inference
1310
+ embedded_pitch_curve = self.pitch_embed(pitch_predictions)
1311
+ embedded_energy_curve = self.energy_embed(energy_predictions)
1312
+ hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve
1313
+ hidden_states = length_regulator(hidden_states, duration_predictions, self.config.speaking_speed)
1314
+ else:
1315
+ # use groundtruth in training
1316
+ embedded_pitch_curve = self.pitch_embed(pitch_labels)
1317
+ embedded_energy_curve = self.energy_embed(energy_labels)
1318
+ hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve
1319
+ hidden_states = length_regulator(hidden_states, duration_labels)
1320
+
1321
+ # forward decoder
1322
+ if not self.training:
1323
+ hidden_mask = None
1324
+ else:
1325
+ spectrogram_mask = (spectrogram_labels != -100).any(dim=-1)
1326
+ spectrogram_mask = spectrogram_mask.int()
1327
+ if self.reduction_factor > 1:
1328
+ length_dim = spectrogram_mask.shape[1] - spectrogram_mask.shape[1] % self.reduction_factor
1329
+ spectrogram_mask = spectrogram_mask[:, :, :length_dim]
1330
+ hidden_mask = spectrogram_mask.unsqueeze(-2)
1331
+
1332
+ decoder_outputs = self.decoder(
1333
+ hidden_states,
1334
+ hidden_mask,
1335
+ output_hidden_states=output_hidden_states,
1336
+ output_attentions=output_attentions,
1337
+ return_dict=return_dict,
1338
+ )
1339
+
1340
+ outputs_before_postnet, outputs_after_postnet = self.speech_decoder_postnet(decoder_outputs[0])
1341
+
1342
+ loss = None
1343
+ if self.training:
1344
+ # calculate loss
1345
+ loss_duration_mask = ~duration_mask
1346
+ loss_spectrogram_mask = spectrogram_mask.unsqueeze(-1).bool()
1347
+ loss = self.criterion(
1348
+ outputs_after_postnet=outputs_after_postnet,
1349
+ outputs_before_postnet=outputs_before_postnet,
1350
+ duration_outputs=duration_predictions,
1351
+ pitch_outputs=pitch_predictions,
1352
+ energy_outputs=energy_predictions,
1353
+ spectrogram_labels=spectrogram_labels,
1354
+ duration_labels=duration_labels,
1355
+ pitch_labels=pitch_labels,
1356
+ energy_labels=energy_labels,
1357
+ duration_mask=loss_duration_mask,
1358
+ spectrogram_mask=loss_spectrogram_mask,
1359
+ )
1360
+
1361
+ if not return_dict:
1362
+ postnet_outputs = (outputs_after_postnet,)
1363
+ audio_feature_predictions = (
1364
+ duration_predictions,
1365
+ pitch_predictions,
1366
+ energy_predictions,
1367
+ )
1368
+ outputs = postnet_outputs + encoder_outputs + decoder_outputs[1:] + audio_feature_predictions
1369
+ return ((loss,) + outputs) if loss is not None else outputs
1370
+
1371
+ return FastSpeech2ConformerModelOutput(
1372
+ loss=loss,
1373
+ spectrogram=outputs_after_postnet,
1374
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1375
+ encoder_hidden_states=encoder_outputs.hidden_states,
1376
+ encoder_attentions=encoder_outputs.attentions,
1377
+ decoder_hidden_states=decoder_outputs.hidden_states,
1378
+ decoder_attentions=decoder_outputs.attentions,
1379
+ duration_outputs=duration_predictions,
1380
+ pitch_outputs=pitch_predictions,
1381
+ energy_outputs=energy_predictions,
1382
+ )
1383
+
1384
+
1385
+ # Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock
1386
+ class HifiGanResidualBlock(nn.Module):
1387
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1):
1388
+ super().__init__()
1389
+ self.leaky_relu_slope = leaky_relu_slope
1390
+
1391
+ self.convs1 = nn.ModuleList(
1392
+ [
1393
+ nn.Conv1d(
1394
+ channels,
1395
+ channels,
1396
+ kernel_size,
1397
+ stride=1,
1398
+ dilation=dilation[i],
1399
+ padding=self.get_padding(kernel_size, dilation[i]),
1400
+ )
1401
+ for i in range(len(dilation))
1402
+ ]
1403
+ )
1404
+ self.convs2 = nn.ModuleList(
1405
+ [
1406
+ nn.Conv1d(
1407
+ channels,
1408
+ channels,
1409
+ kernel_size,
1410
+ stride=1,
1411
+ dilation=1,
1412
+ padding=self.get_padding(kernel_size, 1),
1413
+ )
1414
+ for _ in range(len(dilation))
1415
+ ]
1416
+ )
1417
+
1418
+ def get_padding(self, kernel_size, dilation=1):
1419
+ return (kernel_size * dilation - dilation) // 2
1420
+
1421
+ def apply_weight_norm(self):
1422
+ for layer in self.convs1:
1423
+ nn.utils.weight_norm(layer)
1424
+ for layer in self.convs2:
1425
+ nn.utils.weight_norm(layer)
1426
+
1427
+ def remove_weight_norm(self):
1428
+ for layer in self.convs1:
1429
+ nn.utils.remove_weight_norm(layer)
1430
+ for layer in self.convs2:
1431
+ nn.utils.remove_weight_norm(layer)
1432
+
1433
+ def forward(self, hidden_states):
1434
+ for conv1, conv2 in zip(self.convs1, self.convs2):
1435
+ residual = hidden_states
1436
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
1437
+ hidden_states = conv1(hidden_states)
1438
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
1439
+ hidden_states = conv2(hidden_states)
1440
+ hidden_states = hidden_states + residual
1441
+ return hidden_states
1442
+
1443
+
1444
+ @add_start_docstrings(
1445
+ """HiFi-GAN vocoder.""",
1446
+ HIFIGAN_START_DOCSTRING,
1447
+ )
1448
+ # Copied from transformers.models.speecht5.modeling_speecht5.SpeechT5HifiGan with SpeechT5->FastSpeech2Conformer
1449
+ class FastSpeech2ConformerHifiGan(PreTrainedModel):
1450
+ config_class = FastSpeech2ConformerHifiGanConfig
1451
+ main_input_name = "spectrogram"
1452
+
1453
+ def __init__(self, config: FastSpeech2ConformerHifiGanConfig):
1454
+ super().__init__(config)
1455
+ self.num_kernels = len(config.resblock_kernel_sizes)
1456
+ self.num_upsamples = len(config.upsample_rates)
1457
+ self.conv_pre = nn.Conv1d(
1458
+ config.model_in_dim,
1459
+ config.upsample_initial_channel,
1460
+ kernel_size=7,
1461
+ stride=1,
1462
+ padding=3,
1463
+ )
1464
+
1465
+ self.upsampler = nn.ModuleList()
1466
+ for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
1467
+ self.upsampler.append(
1468
+ nn.ConvTranspose1d(
1469
+ config.upsample_initial_channel // (2**i),
1470
+ config.upsample_initial_channel // (2 ** (i + 1)),
1471
+ kernel_size=kernel_size,
1472
+ stride=upsample_rate,
1473
+ padding=(kernel_size - upsample_rate) // 2,
1474
+ )
1475
+ )
1476
+
1477
+ self.resblocks = nn.ModuleList()
1478
+ for i in range(len(self.upsampler)):
1479
+ channels = config.upsample_initial_channel // (2 ** (i + 1))
1480
+ for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
1481
+ self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
1482
+
1483
+ self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
1484
+
1485
+ self.register_buffer("mean", torch.zeros(config.model_in_dim))
1486
+ self.register_buffer("scale", torch.ones(config.model_in_dim))
1487
+
1488
+ # Initialize weights and apply final processing
1489
+ self.post_init()
1490
+
1491
+ def _init_weights(self, module):
1492
+ """Initialize the weights."""
1493
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
1494
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
1495
+ if module.bias is not None:
1496
+ module.bias.data.zero_()
1497
+
1498
+ def apply_weight_norm(self):
1499
+ nn.utils.weight_norm(self.conv_pre)
1500
+ for layer in self.upsampler:
1501
+ nn.utils.weight_norm(layer)
1502
+ for layer in self.resblocks:
1503
+ layer.apply_weight_norm()
1504
+ nn.utils.weight_norm(self.conv_post)
1505
+
1506
+ def remove_weight_norm(self):
1507
+ nn.utils.remove_weight_norm(self.conv_pre)
1508
+ for layer in self.upsampler:
1509
+ nn.utils.remove_weight_norm(layer)
1510
+ for layer in self.resblocks:
1511
+ layer.remove_weight_norm()
1512
+ nn.utils.remove_weight_norm(self.conv_post)
1513
+
1514
+ def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor:
1515
+ r"""
1516
+ Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
1517
+ of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
1518
+ waveform.
1519
+
1520
+ Args:
1521
+ spectrogram (`torch.FloatTensor`):
1522
+ Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
1523
+ config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.
1524
+
1525
+ Returns:
1526
+ `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
1527
+ shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
1528
+ """
1529
+ if self.config.normalize_before:
1530
+ spectrogram = (spectrogram - self.mean) / self.scale
1531
+
1532
+ is_batched = spectrogram.dim() == 3
1533
+ if not is_batched:
1534
+ spectrogram = spectrogram.unsqueeze(0)
1535
+
1536
+ hidden_states = spectrogram.transpose(2, 1)
1537
+
1538
+ hidden_states = self.conv_pre(hidden_states)
1539
+ for i in range(self.num_upsamples):
1540
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope)
1541
+ hidden_states = self.upsampler[i](hidden_states)
1542
+
1543
+ res_state = self.resblocks[i * self.num_kernels](hidden_states)
1544
+ for j in range(1, self.num_kernels):
1545
+ res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
1546
+ hidden_states = res_state / self.num_kernels
1547
+
1548
+ hidden_states = nn.functional.leaky_relu(hidden_states)
1549
+ hidden_states = self.conv_post(hidden_states)
1550
+ hidden_states = torch.tanh(hidden_states)
1551
+
1552
+ if not is_batched:
1553
+ # remove batch dim and collapse tensor to 1-d audio waveform
1554
+ waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1)
1555
+ else:
1556
+ # remove seq-len dim since this collapses to 1
1557
+ waveform = hidden_states.squeeze(1)
1558
+
1559
+ return waveform
1560
+
1561
+
1562
+ @add_start_docstrings(
1563
+ "The FastSpeech2ConformerModel with a FastSpeech2ConformerHifiGan vocoder head that performs text-to-speech (waveform).",
1564
+ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_START_DOCSTRING,
1565
+ )
1566
+ class FastSpeech2ConformerWithHifiGan(PreTrainedModel):
1567
+ config_class = FastSpeech2ConformerWithHifiGanConfig
1568
+
1569
+ def __init__(self, config: FastSpeech2ConformerWithHifiGanConfig):
1570
+ super().__init__(config)
1571
+
1572
+ self.model = FastSpeech2ConformerModel(config.model_config)
1573
+ self.vocoder = FastSpeech2ConformerHifiGan(config.vocoder_config)
1574
+
1575
+ self.config = config
1576
+
1577
+ @replace_return_docstrings(
1578
+ output_type=FastSpeech2ConformerWithHifiGanOutput, config_class=FastSpeech2ConformerWithHifiGanConfig
1579
+ )
1580
+ def forward(
1581
+ self,
1582
+ input_ids: torch.LongTensor,
1583
+ attention_mask: Optional[torch.LongTensor] = None,
1584
+ spectrogram_labels: Optional[torch.FloatTensor] = None,
1585
+ duration_labels: Optional[torch.LongTensor] = None,
1586
+ pitch_labels: Optional[torch.FloatTensor] = None,
1587
+ energy_labels: Optional[torch.FloatTensor] = None,
1588
+ speaker_ids: Optional[torch.LongTensor] = None,
1589
+ lang_ids: Optional[torch.LongTensor] = None,
1590
+ speaker_embedding: Optional[torch.FloatTensor] = None,
1591
+ return_dict: Optional[bool] = None,
1592
+ output_attentions: Optional[bool] = None,
1593
+ output_hidden_states: Optional[bool] = None,
1594
+ ) -> Union[Tuple, FastSpeech2ConformerModelOutput]:
1595
+ """
1596
+ Args:
1597
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1598
+ Input sequence of text vectors.
1599
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*, defaults to `None`):
1600
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
1601
+ `[0, 1]`: 0 for tokens that are **masked**, 1 for tokens that are **not masked**.
1602
+ spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`):
1603
+ Batch of padded target features.
1604
+ duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`):
1605
+ Batch of padded durations.
1606
+ pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
1607
+ Batch of padded token-averaged pitch.
1608
+ energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
1609
+ Batch of padded token-averaged energy.
1610
+ speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
1611
+ Speaker ids used to condition features of speech output by the model.
1612
+ lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
1613
+ Language ids used to condition features of speech output by the model.
1614
+ speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`):
1615
+ Embedding containing conditioning signals for the features of the speech.
1616
+ return_dict (`bool`, *optional*, defaults to `None`):
1617
+ Whether or not to return a [`FastSpeech2ConformerModelOutput`] instead of a plain tuple.
1618
+ output_attentions (`bool`, *optional*, defaults to `None`):
1619
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1620
+ returned tensors for more detail.
1621
+ output_hidden_states (`bool`, *optional*, defaults to `None`):
1622
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1623
+ for more detail.
1624
+
1625
+ Returns:
1626
+
1627
+ Example:
1628
+
1629
+ ```python
1630
+ >>> from transformers import (
1631
+ ... FastSpeech2ConformerTokenizer,
1632
+ ... FastSpeech2ConformerWithHifiGan,
1633
+ ... )
1634
+
1635
+ >>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
1636
+ >>> inputs = tokenizer("some text to convert to speech", return_tensors="pt")
1637
+ >>> input_ids = inputs["input_ids"]
1638
+
1639
+ >>> model = FastSpeech2ConformerWithHifiGan.from_pretrained("espnet/fastspeech2_conformer_with_hifigan")
1640
+ >>> output_dict = model(input_ids, return_dict=True)
1641
+ >>> waveform = output_dict["waveform"]
1642
+ >>> print(waveform.shape)
1643
+ torch.Size([1, 49664])
1644
+ ```
1645
+ """
1646
+ return_dict = return_dict if return_dict is not None else self.config.model_config.use_return_dict
1647
+ output_attentions = (
1648
+ output_attentions if output_attentions is not None else self.config.model_config.output_attentions
1649
+ )
1650
+ output_hidden_states = (
1651
+ output_hidden_states if output_hidden_states is not None else self.config.model_config.output_hidden_states
1652
+ )
1653
+
1654
+ model_outputs = self.model(
1655
+ input_ids,
1656
+ attention_mask,
1657
+ spectrogram_labels=spectrogram_labels,
1658
+ duration_labels=duration_labels,
1659
+ pitch_labels=pitch_labels,
1660
+ energy_labels=energy_labels,
1661
+ speaker_ids=speaker_ids,
1662
+ lang_ids=lang_ids,
1663
+ speaker_embedding=speaker_embedding,
1664
+ return_dict=return_dict,
1665
+ output_attentions=output_attentions,
1666
+ output_hidden_states=output_hidden_states,
1667
+ )
1668
+
1669
+ if not return_dict:
1670
+ has_missing_labels = (
1671
+ spectrogram_labels is None or duration_labels is None or pitch_labels is None or energy_labels is None
1672
+ )
1673
+ if has_missing_labels:
1674
+ spectrogram = model_outputs[0]
1675
+ else:
1676
+ spectrogram = model_outputs[1]
1677
+ else:
1678
+ spectrogram = model_outputs["spectrogram"]
1679
+ waveform = self.vocoder(spectrogram)
1680
+
1681
+ if not return_dict:
1682
+ return model_outputs + (waveform,)
1683
+
1684
+ return FastSpeech2ConformerWithHifiGanOutput(waveform=waveform, **model_outputs)
llmeval-env/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for FastSpeech2Conformer."""
16
+ import json
17
+ import os
18
+ from typing import Optional, Tuple
19
+
20
+ import regex
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer
23
+ from ...utils import logging, requires_backends
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"}
29
+
30
+
31
+ class FastSpeech2ConformerTokenizer(PreTrainedTokenizer):
32
+ """
33
+ Construct a FastSpeech2Conformer tokenizer.
34
+
35
+ Args:
36
+ vocab_file (`str`):
37
+ Path to the vocabulary file.
38
+ bos_token (`str`, *optional*, defaults to `"<sos/eos>"`):
39
+ The begin of sequence token. Note that for FastSpeech2, it is the same as the `eos_token`.
40
+ eos_token (`str`, *optional*, defaults to `"<sos/eos>"`):
41
+ The end of sequence token. Note that for FastSpeech2, it is the same as the `bos_token`.
42
+ pad_token (`str`, *optional*, defaults to `"<blank>"`):
43
+ The token used for padding, for example when batching sequences of different lengths.
44
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
45
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
46
+ token instead.
47
+ should_strip_spaces (`bool`, *optional*, defaults to `False`):
48
+ Whether or not to strip the spaces from the list of tokens.
49
+ """
50
+
51
+ vocab_files_names = VOCAB_FILES_NAMES
52
+ model_input_names = ["input_ids", "attention_mask"]
53
+
54
+ def __init__(
55
+ self,
56
+ vocab_file,
57
+ bos_token="<sos/eos>",
58
+ eos_token="<sos/eos>",
59
+ pad_token="<blank>",
60
+ unk_token="<unk>",
61
+ should_strip_spaces=False,
62
+ **kwargs,
63
+ ):
64
+ requires_backends(self, "g2p_en")
65
+
66
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
67
+ self.encoder = json.load(vocab_handle)
68
+
69
+ import g2p_en
70
+
71
+ self.g2p = g2p_en.G2p()
72
+
73
+ self.decoder = {v: k for k, v in self.encoder.items()}
74
+
75
+ super().__init__(
76
+ bos_token=bos_token,
77
+ eos_token=eos_token,
78
+ unk_token=unk_token,
79
+ pad_token=pad_token,
80
+ should_strip_spaces=should_strip_spaces,
81
+ **kwargs,
82
+ )
83
+
84
+ self.should_strip_spaces = should_strip_spaces
85
+
86
+ @property
87
+ def vocab_size(self):
88
+ return len(self.decoder)
89
+
90
+ def get_vocab(self):
91
+ "Returns vocab as a dict"
92
+ return dict(self.encoder, **self.added_tokens_encoder)
93
+
94
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
95
+ # expand symbols
96
+ text = regex.sub(";", ",", text)
97
+ text = regex.sub(":", ",", text)
98
+ text = regex.sub("-", " ", text)
99
+ text = regex.sub("&", "and", text)
100
+
101
+ # strip unnecessary symbols
102
+ text = regex.sub(r"[\(\)\[\]\<\>\"]+", "", text)
103
+
104
+ # strip whitespaces
105
+ text = regex.sub(r"\s+", " ", text)
106
+
107
+ text = text.upper()
108
+
109
+ return text, kwargs
110
+
111
+ def _tokenize(self, text):
112
+ """Returns a tokenized string."""
113
+ # phonemize
114
+ tokens = self.g2p(text)
115
+
116
+ if self.should_strip_spaces:
117
+ tokens = list(filter(lambda s: s != " ", tokens))
118
+
119
+ tokens.append(self.eos_token)
120
+
121
+ return tokens
122
+
123
+ def _convert_token_to_id(self, token):
124
+ """Converts a token (str) in an id using the vocab."""
125
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
126
+
127
+ def _convert_id_to_token(self, index):
128
+ """Converts an index (integer) in a token (str) using the vocab."""
129
+ return self.decoder.get(index, self.unk_token)
130
+
131
+ # Override since phonemes cannot be converted back to strings
132
+ def decode(self, token_ids, **kwargs):
133
+ logger.warning(
134
+ "Phonemes cannot be reliably converted to a string due to the one-many mapping, converting to tokens instead."
135
+ )
136
+ return self.convert_ids_to_tokens(token_ids)
137
+
138
+ # Override since phonemes cannot be converted back to strings
139
+ def convert_tokens_to_string(self, tokens, **kwargs):
140
+ logger.warning(
141
+ "Phonemes cannot be reliably converted to a string due to the one-many mapping, returning the tokens."
142
+ )
143
+ return tokens
144
+
145
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
146
+ """
147
+ Save the vocabulary and special tokens file to a directory.
148
+
149
+ Args:
150
+ save_directory (`str`):
151
+ The directory in which to save the vocabulary.
152
+
153
+ Returns:
154
+ `Tuple(str)`: Paths to the files saved.
155
+ """
156
+ if not os.path.isdir(save_directory):
157
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
158
+ return
159
+ vocab_file = os.path.join(
160
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
161
+ )
162
+
163
+ with open(vocab_file, "w", encoding="utf-8") as f:
164
+ f.write(json.dumps(self.get_vocab(), ensure_ascii=False))
165
+
166
+ return (vocab_file,)
167
+
168
+ def __getstate__(self):
169
+ state = self.__dict__.copy()
170
+ state["g2p"] = None
171
+ return state
172
+
173
+ def __setstate__(self, d):
174
+ self.__dict__ = d
175
+
176
+ try:
177
+ import g2p_en
178
+
179
+ self.g2p = g2p_en.G2p()
180
+ except ImportError:
181
+ raise ImportError(
182
+ "You need to install g2p-en to use FastSpeech2ConformerTokenizer. "
183
+ "See https://pypi.org/project/g2p-en/ for installation."
184
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 AdeptAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_fuyu": ["FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP", "FuyuConfig"],
21
+ }
22
+
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["image_processing_fuyu"] = ["FuyuImageProcessor"]
31
+ _import_structure["processing_fuyu"] = ["FuyuProcessor"]
32
+
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_fuyu"] = [
41
+ "FuyuForCausalLM",
42
+ "FuyuPreTrainedModel",
43
+ ]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_fuyu import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP, FuyuConfig
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .image_processing_fuyu import FuyuImageProcessor
56
+ from .processing_fuyu import FuyuProcessor
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_fuyu import (
65
+ FuyuForCausalLM,
66
+ FuyuPreTrainedModel,
67
+ )
68
+
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/configuration_fuyu.cpython-310.pyc ADDED
Binary file (7.94 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/convert_fuyu_model_weights_to_hf.cpython-310.pyc ADDED
Binary file (2.96 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/image_processing_fuyu.cpython-310.pyc ADDED
Binary file (25.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/configuration_fuyu.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Adept AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Fuyu model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ..auto import CONFIG_MAPPING
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class FuyuConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`FuyuForCausalLM`]. It is used to instantiate an
31
+ Fuyu model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the
33
+ [adept/fuyu-8b](https://huggingface.co/adept/fuyu-8b).
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 262144):
41
+ Vocabulary size of the Fuyu model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`FuyuForCausalLM`]
43
+ hidden_size (`int`, *optional*, defaults to 4096):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 16384):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 36):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 64):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
52
+ The non-linear activation function (function or string) in the decoder.
53
+ max_position_embeddings (`int`, *optional*, defaults to 16384):
54
+ The maximum sequence length that this model might ever be used with.
55
+ image_size (`int`, *optional*, defaults to 300):
56
+ The input image size.
57
+ patch_size (`int`, *optional*, defaults to 30):
58
+ The input vision transformer encoding patch size.
59
+ num_channels (`int`, *optional*, defaults to 3):
60
+ The input image number of channels.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings
68
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
69
+ Whether to tie input and output embeddings.
70
+ rope_theta (`float`, *optional*, defaults to 25000.0):
71
+ The base period of the RoPE embeddings.
72
+ rope_scaling (`Dict`, *optional*):
73
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
74
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
75
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
76
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
77
+ these scaling strategies behave:
78
+ https://www.reddit.com/r/LocalFuyu/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
79
+ experimental feature, subject to breaking API changes in future versions.
80
+ qk_layernorm (`bool`, *optional*, defaults to `True`):
81
+ Whether or not to normalize the Queries and Keys after projecting the hidden states
82
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
83
+ The dropout ratio after applying the MLP to the hidden states.
84
+ attention_dropout (`float`, *optional*, defaults to 0.0):
85
+ The dropout ratio after computing the attention scores.
86
+ partial_rotary_factor (`float`, *optional*, defaults to 0.5):
87
+ Percentage of the query and keys which will have rotary embedding.
88
+
89
+ pad_token_id (`int`, *optional*):
90
+ The id of the *padding* token.
91
+ bos_token_id (`int`, *optional*, defaults to 1):
92
+ The id of the *beginning-of-sequence* token.
93
+ eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2):
94
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
95
+ text_config (`dict`, *optional*):
96
+ Dictionary of configuration options used to initialize the `language``[`Aut`].
97
+
98
+ ```python
99
+ >>> from transformers import FuyuConfig
100
+
101
+ >>> # Initializing a Fuyu fuyu-7b style configuration
102
+ >>> configuration = FuyuConfig()
103
+ ```"""
104
+
105
+ model_type = "fuyu"
106
+ keys_to_ignore_at_inference = ["past_key_values"]
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=262144,
111
+ hidden_size=4096,
112
+ intermediate_size=16384,
113
+ num_hidden_layers=36,
114
+ num_attention_heads=64,
115
+ hidden_act="relu2",
116
+ max_position_embeddings=16384,
117
+ image_size=300,
118
+ patch_size=30,
119
+ num_channels=3,
120
+ initializer_range=0.02,
121
+ layer_norm_eps=1e-5,
122
+ use_cache=True,
123
+ tie_word_embeddings=False,
124
+ rope_theta=25000.0,
125
+ rope_scaling=None,
126
+ qk_layernorm=True,
127
+ hidden_dropout=0.0,
128
+ attention_dropout=0.0,
129
+ partial_rotary_factor=0.5,
130
+ pad_token_id=None,
131
+ bos_token_id=1,
132
+ eos_token_id=2,
133
+ text_config=None,
134
+ **kwargs,
135
+ ):
136
+ if text_config is None:
137
+ text_config = {
138
+ "vocab_size": vocab_size,
139
+ "max_position_embeddings": max_position_embeddings,
140
+ "hidden_size": hidden_size,
141
+ "intermediate_size": intermediate_size,
142
+ "num_hidden_layers": num_hidden_layers,
143
+ "num_attention_heads": num_attention_heads,
144
+ "hidden_act": hidden_act,
145
+ "initializer_range": initializer_range,
146
+ "layer_norm_eps": layer_norm_eps,
147
+ "use_cache": use_cache,
148
+ "rope_theta": rope_theta,
149
+ "rope_scaling": rope_scaling,
150
+ "qk_layernorm": qk_layernorm,
151
+ "hidden_dropout": hidden_dropout,
152
+ "attention_dropout": attention_dropout,
153
+ "partial_rotary_factor": partial_rotary_factor,
154
+ "pad_token_id": pad_token_id,
155
+ "bos_token_id": bos_token_id,
156
+ "eos_token_id": eos_token_id,
157
+ "tie_word_embeddings": tie_word_embeddings,
158
+ }
159
+ logger.info("text_config is None. initializing the text model with default values.")
160
+ text_model_type = text_config["model_type"] if "model_type" in text_config else "persimmon"
161
+ self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
162
+
163
+ self.vocab_size = vocab_size
164
+ self.max_position_embeddings = max_position_embeddings
165
+ self.image_size = image_size
166
+ self.patch_size = patch_size
167
+ self.num_channels = num_channels
168
+ self.hidden_size = hidden_size
169
+ self.intermediate_size = intermediate_size
170
+ self.num_hidden_layers = num_hidden_layers
171
+ self.num_attention_heads = num_attention_heads
172
+ self.hidden_act = hidden_act
173
+ self.initializer_range = initializer_range
174
+ self.layer_norm_eps = layer_norm_eps
175
+ self.use_cache = use_cache
176
+ self.rope_theta = rope_theta
177
+ self.rope_scaling = rope_scaling
178
+ self.qk_layernorm = qk_layernorm
179
+ self.hidden_dropout = hidden_dropout
180
+ self.attention_dropout = attention_dropout
181
+ self.partial_rotary_factor = partial_rotary_factor
182
+ self._rope_scaling_validation()
183
+
184
+ super().__init__(
185
+ pad_token_id=pad_token_id,
186
+ bos_token_id=bos_token_id,
187
+ eos_token_id=eos_token_id,
188
+ tie_word_embeddings=tie_word_embeddings,
189
+ **kwargs,
190
+ )
191
+
192
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
193
+ def _rope_scaling_validation(self):
194
+ """
195
+ Validate the `rope_scaling` configuration.
196
+ """
197
+ if self.rope_scaling is None:
198
+ return
199
+
200
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
201
+ raise ValueError(
202
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
203
+ )
204
+ rope_scaling_type = self.rope_scaling.get("type", None)
205
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
206
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
207
+ raise ValueError(
208
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
209
+ )
210
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
211
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import os
16
+ import sys
17
+ import warnings
18
+
19
+ import flatdict
20
+ import torch
21
+
22
+ from transformers import FuyuConfig, FuyuForCausalLM, LlamaTokenizer
23
+
24
+
25
+ try:
26
+ from transformers import LlamaTokenizerFast
27
+
28
+ tokenizer_class = LlamaTokenizerFast
29
+ except ImportError as e:
30
+ warnings.warn(e)
31
+ warnings.warn(
32
+ "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
33
+ )
34
+ tokenizer_class = LlamaTokenizer
35
+
36
+ """
37
+ Sample usage: # TODO fix clone links from persimmon to fuyu
38
+ ```
39
+ git clone https://github.com/adept-ai-labs/adept-inference
40
+ wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_base_model_release.tar
41
+ wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_chat_model_release.tar
42
+ python src/transformers/models/fuyu/convert_fuyu_weights_to_hf.py --input_dir /path/to/downloaded/fuyu/weights/ --output_dir /output/path
43
+ ```
44
+
45
+ Thereafter, models can be loaded via:
46
+
47
+ ```py
48
+ from transformers import FuyuForCausalLM, FuyuTokenizer
49
+
50
+ model = FuyuForCausalLM.from_pretrained("/output/path")
51
+ tokenizer = FuyuTokenizer.from_pretrained("/output/path")
52
+ ```
53
+
54
+ Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
55
+ come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
56
+ """
57
+
58
+
59
+ KEYS_TO_MODIFY_MAPPING = {
60
+ "self_attention": "self_attn",
61
+ "language_model.encoder": "language_model.model",
62
+ "word_embeddings_for_head": "language_model.lm_head",
63
+ "language_model.embedding.word_embeddings": "language_model.model.embed_tokens",
64
+ "vit_encoder.linear_encoder": "vision_embed_tokens",
65
+ }
66
+
67
+ KEYS_TO_REMOVE = {
68
+ "rotary_emb.inv_freq",
69
+ "image_patch_projection",
70
+ "image_patch_projection.weight",
71
+ "image_patch_projection.bias",
72
+ }
73
+
74
+
75
+ def rename_state_dict(state_dict):
76
+ model_state_dict = {}
77
+ for key, value in state_dict.items():
78
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
79
+ if key_to_modify in key:
80
+ key = key.replace(key_to_modify, new_key)
81
+ # if KEYS_TO_REMOVE in key:
82
+ if key in KEYS_TO_REMOVE:
83
+ continue
84
+ model_state_dict[key] = value
85
+ return model_state_dict
86
+
87
+
88
+ def convert_fuyu_checkpoint(pytorch_dump_folder_path, ada_lib_path, pt_model_path, safe_serialization=False):
89
+ sys.path.insert(0, ada_lib_path)
90
+ model_state_dict_base = torch.load(pt_model_path, map_location="cpu")
91
+ state_dict = flatdict.FlatDict(model_state_dict_base["model"], ".")
92
+ state_dict = rename_state_dict(state_dict)
93
+
94
+ transformers_config = FuyuConfig()
95
+ model = FuyuForCausalLM(transformers_config).to(torch.bfloat16)
96
+ model.load_state_dict(state_dict)
97
+ model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization)
98
+ transformers_config.save_pretrained(pytorch_dump_folder_path)
99
+
100
+
101
+ def main():
102
+ parser = argparse.ArgumentParser()
103
+ parser.add_argument(
104
+ "--input_dir",
105
+ help="Location of Fuyu weights, which contains tokenizer.model and model folders",
106
+ )
107
+ parser.add_argument(
108
+ "--pt_model_path",
109
+ help="Location of Fuyu `model_optim_rng.pt`",
110
+ )
111
+ parser.add_argument(
112
+ "--output_dir",
113
+ help="Location to write HF model and tokenizer",
114
+ )
115
+ parser.add_argument(
116
+ "--ada_lib_path",
117
+ help="Location of original source code from adept to deserialize .pt checkpoint",
118
+ )
119
+ parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.")
120
+ args = parser.parse_args()
121
+ spm_path = os.path.join(args.input_dir, "adept_vocab.model")
122
+
123
+ convert_fuyu_checkpoint(
124
+ pytorch_dump_folder_path=args.output_dir,
125
+ pt_model_path=args.pt_model_path,
126
+ safe_serialization=args.safe_serialization,
127
+ ada_lib_path=args.ada_lib_path,
128
+ )
129
+ tokenizer = tokenizer_class(spm_path, bos_token="|ENDOFTEXT|", eos_token="|ENDOFTEXT|")
130
+ tokenizer.save_pretrained(args.output_dir)
131
+
132
+
133
+ if __name__ == "__main__":
134
+ main()
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/image_processing_fuyu.py ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Fuyu."""
16
+
17
+ import math
18
+ from typing import Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
23
+ from ...image_transforms import (
24
+ pad,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ ChannelDimension,
30
+ ImageInput,
31
+ PILImageResampling,
32
+ get_image_size,
33
+ infer_channel_dimension_format,
34
+ is_scaled_image,
35
+ is_valid_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ validate_preprocess_arguments,
39
+ )
40
+ from ...utils import (
41
+ TensorType,
42
+ is_torch_available,
43
+ is_torch_device,
44
+ is_torch_dtype,
45
+ logging,
46
+ requires_backends,
47
+ )
48
+
49
+
50
+ if is_torch_available():
51
+ import torch
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+
57
+ def make_list_of_list_of_images(
58
+ images: Union[List[List[ImageInput]], List[ImageInput], ImageInput],
59
+ ) -> List[List[ImageInput]]:
60
+ if is_valid_image(images):
61
+ return [[images]]
62
+
63
+ if isinstance(images, list) and all(isinstance(image, list) for image in images):
64
+ return images
65
+
66
+ if isinstance(images, list):
67
+ return [make_list_of_images(image) for image in images]
68
+
69
+ raise ValueError("images must be a list of list of images or a list of images or an image.")
70
+
71
+
72
+ class FuyuBatchFeature(BatchFeature):
73
+ """
74
+ BatchFeature class for Fuyu image processor and processor.
75
+
76
+ The outputs dictionary from the processors contains a mix of tensors and lists of tensors.
77
+ """
78
+
79
+ def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
80
+ """
81
+ Convert the inner content to tensors.
82
+
83
+ Args:
84
+ tensor_type (`str` or [`~utils.TensorType`], *optional*):
85
+ The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
86
+ `None`, no modification is done.
87
+ """
88
+ if tensor_type is None:
89
+ return self
90
+
91
+ is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type=tensor_type)
92
+
93
+ def _convert_tensor(elem):
94
+ if is_tensor(elem):
95
+ return elem
96
+ return as_tensor(elem)
97
+
98
+ def _safe_convert_tensor(elem):
99
+ try:
100
+ return _convert_tensor(elem)
101
+ except: # noqa E722
102
+ if key == "overflowing_values":
103
+ raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
104
+ raise ValueError(
105
+ "Unable to create tensor, you should probably activate padding "
106
+ "with 'padding=True' to have batched tensors with the same length."
107
+ )
108
+
109
+ # Do the tensor conversion in batch
110
+ for key, value in self.items():
111
+ if isinstance(value, list) and isinstance(value[0], list):
112
+ # List[List[Any]] -> List[List[Tensor]]
113
+ self[key] = [[_safe_convert_tensor(elem) for elem in elems] for elems in value]
114
+ elif isinstance(value, list):
115
+ # List[Any] -> List[Tensor]
116
+ self[key] = [_safe_convert_tensor(elem) for elem in value]
117
+ else:
118
+ # Any -> Tensor
119
+ self[key] = _safe_convert_tensor(value)
120
+ return self
121
+
122
+ def to(self, *args, **kwargs) -> "BatchFeature":
123
+ """
124
+ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in
125
+ different `dtypes` and sending the `BatchFeature` to a different `device`.
126
+
127
+ Args:
128
+ args (`Tuple`):
129
+ Will be passed to the `to(...)` function of the tensors.
130
+ kwargs (`Dict`, *optional*):
131
+ Will be passed to the `to(...)` function of the tensors.
132
+
133
+ Returns:
134
+ [`BatchFeature`]: The same instance after modification.
135
+ """
136
+ requires_backends(self, ["torch"])
137
+ import torch # noqa
138
+
139
+ new_data = {}
140
+ device = kwargs.get("device")
141
+ # Check if the args are a device or a dtype
142
+ if device is None and len(args) > 0:
143
+ # device should be always the first argument
144
+ arg = args[0]
145
+ if is_torch_dtype(arg):
146
+ # The first argument is a dtype
147
+ pass
148
+ elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):
149
+ device = arg
150
+ else:
151
+ # it's something else
152
+ raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.")
153
+
154
+ def _to(elem):
155
+ # check if v is a floating point
156
+ if torch.is_floating_point(elem):
157
+ # cast and send to device
158
+ return elem.to(*args, **kwargs)
159
+ if device is not None:
160
+ return elem.to(device=device)
161
+
162
+ return elem
163
+
164
+ # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor`
165
+ for k, v in self.items():
166
+ if isinstance(v, list) and isinstance(v[0], list):
167
+ # Data structure is a list of lists
168
+ new_v = []
169
+ for elems in v:
170
+ new_v.append([_to(elem) for elem in elems])
171
+ new_data[k] = new_v
172
+ elif isinstance(v, list):
173
+ # Data structure is a list
174
+ new_data[k] = [_to(elem) for elem in v]
175
+ else:
176
+ new_data[k] = _to(v)
177
+ self.data = new_data
178
+ return self
179
+
180
+
181
+ class FuyuImageProcessor(BaseImageProcessor):
182
+ """
183
+ This class should handle the image processing part before the main FuyuForCausalLM. In particular, it should
184
+ handle:
185
+
186
+ - Processing Images:
187
+ Taking a batch of images as input. If the images are variable-sized, it resizes them based on the desired patch
188
+ dimensions. The image output is always img_h, img_w of (1080, 1920)
189
+
190
+ Then, it patches up these images using the patchify_image function.
191
+
192
+ - Creating Image Input IDs:
193
+ For each patch, a placeholder ID is given to identify where these patches belong in a token sequence. For
194
+ variable-sized images, each line of patches is terminated with a newline ID.
195
+
196
+ - Image Patch Indices:
197
+ For each image patch, the code maintains an index where these patches should be inserted in a token stream.
198
+
199
+
200
+ Args:
201
+ do_resize (`bool`, *optional*, defaults to `True`):
202
+ Whether to resize the image to `size`.
203
+ size (`Dict[str, int]`, *optional*, defaults to `{"height": 1080, "width": 1920}`):
204
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
205
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
206
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
207
+ do_pad (`bool`, *optional*, defaults to `True`):
208
+ Whether to pad the image to `size`.
209
+ padding_value (`float`, *optional*, defaults to 1.0):
210
+ The value to pad the image with.
211
+ padding_mode (`str`, *optional*, defaults to `"constant"`):
212
+ The padding mode to use when padding the image.
213
+ do_normalize (`bool`, *optional*, defaults to `True`):
214
+ Whether to normalize the image.
215
+ image_mean (`float`, *optional*, defaults to 0.5):
216
+ The mean to use when normalizing the image.
217
+ image_std (`float`, *optional*, defaults to 0.5):
218
+ The standard deviation to use when normalizing the image.
219
+ do_rescale (`bool`, *optional*, defaults to `True`):
220
+ Whether to rescale the image.
221
+ rescale_factor (`float`, *optional*, defaults to `1 / 255`):
222
+ The factor to use when rescaling the image.
223
+ patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 30, "width": 30}`):
224
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
225
+ """
226
+
227
+ model_input_names = [
228
+ "images",
229
+ "image_input_ids",
230
+ "image_patches",
231
+ "image_patch_indices_per_batch",
232
+ "image_patch_indices_per_subsequence",
233
+ ]
234
+
235
+ def __init__(
236
+ self,
237
+ do_resize: bool = True,
238
+ size: Optional[Dict[str, int]] = None,
239
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
240
+ do_pad: bool = True,
241
+ padding_value: float = 1.0,
242
+ padding_mode: str = "constant",
243
+ do_normalize: bool = True,
244
+ image_mean: Union[float, List[float]] = 0.5,
245
+ image_std: Union[float, List[float]] = 0.5,
246
+ do_rescale: bool = True,
247
+ rescale_factor: float = 1 / 255,
248
+ patch_size: Optional[Dict[str, int]] = None,
249
+ **kwargs,
250
+ ):
251
+ super().__init__(**kwargs)
252
+ self.do_resize = do_resize
253
+ self.size = size if size is not None else {"height": 1080, "width": 1920}
254
+ self.resample = resample
255
+ self.do_pad = do_pad
256
+ self.padding_value = padding_value
257
+ self.padding_mode = padding_mode
258
+ self.do_normalize = do_normalize
259
+ self.image_mean = image_mean
260
+ self.image_std = image_std
261
+ self.do_rescale = do_rescale
262
+ self.rescale_factor = rescale_factor
263
+ self.patch_size = patch_size if patch_size is not None else {"height": 30, "width": 30}
264
+ self._valid_processor_keys = [
265
+ "images",
266
+ "do_resize",
267
+ "size",
268
+ "resample",
269
+ "do_pad",
270
+ "padding_value",
271
+ "padding_mode",
272
+ "do_normalize",
273
+ "image_mean",
274
+ "image_std",
275
+ "do_rescale",
276
+ "rescale_factor",
277
+ "patch_size",
278
+ "return_tensors",
279
+ "data_format",
280
+ "input_data_format",
281
+ ]
282
+
283
+ def resize(
284
+ self,
285
+ image: np.ndarray,
286
+ size: Dict[str, int],
287
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
288
+ data_format: Optional[Union[str, ChannelDimension]] = None,
289
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
290
+ **kwargs,
291
+ ) -> np.ndarray:
292
+ """
293
+ Resize an image to `(size["height"], size["width"])`.
294
+
295
+ Args:
296
+ image (`np.ndarray`):
297
+ Image to resize.
298
+ size (`Dict[str, int]`):
299
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
300
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
301
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
302
+ data_format (`ChannelDimension` or `str`, *optional*):
303
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
304
+ image is used. Can be one of:
305
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
306
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
307
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
308
+ input_data_format (`ChannelDimension` or `str`, *optional*):
309
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
310
+ from the input image. Can be one of:
311
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
312
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
313
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
314
+
315
+ Returns:
316
+ `np.ndarray`: The resized image.
317
+ """
318
+ image_height, image_width = get_image_size(image, input_data_format)
319
+ target_height, target_width = size["height"], size["width"]
320
+
321
+ if image_width <= target_width and image_height <= target_height:
322
+ return image
323
+
324
+ height_scale_factor = target_height / image_height
325
+ width_scale_factor = target_width / image_width
326
+ optimal_scale_factor = min(height_scale_factor, width_scale_factor)
327
+
328
+ new_height = int(image_height * optimal_scale_factor)
329
+ new_width = int(image_width * optimal_scale_factor)
330
+
331
+ scaled_image = resize(
332
+ image=image,
333
+ size=(new_height, new_width),
334
+ resample=resample,
335
+ data_format=data_format,
336
+ input_data_format=input_data_format,
337
+ **kwargs,
338
+ )
339
+ return scaled_image
340
+
341
+ def pad_image(
342
+ self,
343
+ image: np.ndarray,
344
+ size: Dict[str, int],
345
+ mode: str = "constant",
346
+ constant_values: float = 1.0,
347
+ data_format: Optional[Union[str, ChannelDimension]] = None,
348
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
349
+ ) -> np.ndarray:
350
+ """
351
+ Pad an image to `(size["height"], size["width"])`.
352
+
353
+ Args:
354
+ image (`np.ndarray`):
355
+ Image to pad.
356
+ size (`Dict[str, int]`):
357
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
358
+ data_format (`ChannelDimension` or `str`, *optional*):
359
+ The data format of the output image. If unset, the same format as the input image is used.
360
+ input_data_format (`ChannelDimension` or `str`, *optional*):
361
+ The channel dimension format of the input image. If not provided, it will be inferred.
362
+ """
363
+ image_height, image_width = get_image_size(image, input_data_format)
364
+ target_height, target_width = size["height"], size["width"]
365
+ padding_top = 0
366
+ padding_left = 0
367
+ padding_bottom = target_height - image_height
368
+ padding_right = target_width - image_width
369
+ padded_image = pad(
370
+ image,
371
+ padding=((padding_top, padding_bottom), (padding_left, padding_right)),
372
+ mode=mode,
373
+ constant_values=constant_values,
374
+ data_format=data_format,
375
+ input_data_format=input_data_format,
376
+ )
377
+ return padded_image
378
+
379
+ def preprocess(
380
+ self,
381
+ images,
382
+ do_resize: Optional[bool] = None,
383
+ size: Optional[Dict[str, int]] = None,
384
+ resample: Optional[PILImageResampling] = None,
385
+ do_pad: Optional[bool] = None,
386
+ padding_value: Optional[float] = None,
387
+ padding_mode: Optional[str] = None,
388
+ do_normalize: Optional[bool] = None,
389
+ image_mean: Optional[float] = None,
390
+ image_std: Optional[float] = None,
391
+ do_rescale: Optional[bool] = None,
392
+ rescale_factor: Optional[float] = None,
393
+ patch_size: Optional[Dict[str, int]] = None,
394
+ data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,
395
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
396
+ return_tensors: Optional[TensorType] = None,
397
+ ):
398
+ """
399
+
400
+ Utility function to preprocess the images and extract necessary information about original formats.
401
+
402
+ Args:
403
+ images (`ImageInput`):
404
+ Images to preprocess. Expects a single image, a list or images or a list of lists of images. Pixel
405
+ values range from 0 to 255, or between 0 and 1 if `do_rescale` is `False`.
406
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
407
+ Whether to resize the image to `size`.
408
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
409
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
410
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
411
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
412
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
413
+ Whether to pad the image to `size`.
414
+ padding_value (`float`, *optional*, defaults to `self.padding_value`):
415
+ The value to pad the image with.
416
+ padding_mode (`str`, *optional*, defaults to `self.padding_mode`):
417
+ The padding mode to use when padding the image.
418
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
419
+ Whether to normalize the image.
420
+ image_mean (`float`, *optional*, defaults to `self.image_mean`):
421
+ The mean to use when normalizing the image.
422
+ image_std (`float`, *optional*, defaults to `self.image_std`):
423
+ The standard deviation to use when normalizing the image.
424
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
425
+ Whether to rescale the image.
426
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
427
+ The factor to use when rescaling the image.
428
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
429
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
430
+ return_tensors (`str` or `TensorType`, *optional*):
431
+ The type of tensors to return. Can be one of:
432
+ - Unset: Return a list of `np.ndarray`.
433
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
434
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
435
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
436
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
437
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
438
+ The channel dimension format of the output image. Can be one of:
439
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
440
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
441
+ input_data_format (`ChannelDimension` or `str`, *optional*):
442
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
443
+ from the input image. Can be one of:
444
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
445
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
446
+ """
447
+
448
+ do_resize = do_resize if do_resize is not None else self.do_resize
449
+ size = size if size is not None else self.size
450
+ resample = resample if resample is not None else self.resample
451
+ do_pad = do_pad if do_pad is not None else self.do_pad
452
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
453
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
454
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
455
+ image_mean = image_mean if image_mean is not None else self.image_mean
456
+ image_std = image_std if image_std is not None else self.image_std
457
+ padding_value = padding_value if padding_value is not None else self.padding_value
458
+ padding_mode = padding_mode if padding_mode is not None else self.padding_mode
459
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
460
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
461
+ patch_size = patch_size if patch_size is not None else self.patch_size
462
+
463
+ if isinstance(images, list) and any(isinstance(elem, list) and len(elem) >= 2 for elem in images):
464
+ raise ValueError("Multiple images for a single sample are not yet supported.")
465
+
466
+ batch_images = make_list_of_list_of_images(images)
467
+
468
+ validate_preprocess_arguments(
469
+ do_rescale=do_rescale,
470
+ rescale_factor=rescale_factor,
471
+ do_normalize=do_normalize,
472
+ image_mean=image_mean,
473
+ image_std=image_std,
474
+ do_pad=do_pad,
475
+ size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg.
476
+ do_resize=do_resize,
477
+ size=size,
478
+ resample=resample,
479
+ )
480
+ # All transformations expect numpy arrays.
481
+ batch_images = [[to_numpy_array(image) for image in images] for images in batch_images]
482
+
483
+ if is_scaled_image(batch_images[0][0]) and do_rescale:
484
+ logger.warning_once(
485
+ "It looks like you are trying to rescale already rescaled images. If the input"
486
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
487
+ )
488
+
489
+ if input_data_format is None:
490
+ # We assume that all images have the same channel dimension format.
491
+ input_data_format = infer_channel_dimension_format(batch_images[0][0])
492
+
493
+ original_image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images]
494
+
495
+ if do_resize:
496
+ batch_images = [
497
+ [self.resize(image, size=size, input_data_format=input_data_format) for image in images]
498
+ for images in batch_images
499
+ ]
500
+
501
+ image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images]
502
+ image_unpadded_heights = [[image_size[0]] for image_size in image_sizes]
503
+ image_unpadded_widths = [[image_size[1]] for image_size in image_sizes]
504
+
505
+ # scale_h is the same as scale_w
506
+ image_scale_factors = [
507
+ [resized_size[0] / original_size[0]]
508
+ for original_size, resized_size in zip(original_image_sizes, image_sizes)
509
+ ]
510
+
511
+ if do_pad:
512
+ batch_images = [
513
+ [
514
+ self.pad_image(
515
+ image,
516
+ size=size,
517
+ mode=padding_mode,
518
+ constant_values=padding_value,
519
+ input_data_format=input_data_format,
520
+ )
521
+ for image in images
522
+ ]
523
+ for images in batch_images
524
+ ]
525
+
526
+ if do_rescale:
527
+ batch_images = [
528
+ [self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
529
+ for images in batch_images
530
+ ]
531
+
532
+ if do_normalize:
533
+ batch_images = [
534
+ [
535
+ self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
536
+ for image in images
537
+ ]
538
+ for images in batch_images
539
+ ]
540
+
541
+ if data_format is not None:
542
+ batch_images = [
543
+ [to_channel_dimension_format(image, data_format, input_data_format) for image in images]
544
+ for images in batch_images
545
+ ]
546
+
547
+ data = {
548
+ "images": batch_images,
549
+ "image_unpadded_heights": image_unpadded_heights,
550
+ "image_unpadded_widths": image_unpadded_widths,
551
+ "image_scale_factors": image_scale_factors,
552
+ }
553
+ return FuyuBatchFeature(data=data, tensor_type=return_tensors)
554
+
555
+ def get_num_patches(self, image_height: int, image_width: int, patch_size: Dict[str, int] = None) -> int:
556
+ """
557
+ Calculate number of patches required to encode an image.
558
+
559
+ Args:
560
+ image_height (`int`):
561
+ Height of the image.
562
+ image_width (`int`):
563
+ Width of the image.
564
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
565
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
566
+ """
567
+ patch_size = patch_size if patch_size is not None else self.patch_size
568
+ patch_height, patch_width = self.patch_size["height"], self.patch_size["width"]
569
+
570
+ if image_height % patch_height != 0:
571
+ raise ValueError(f"{image_height=} must be divisible by {patch_height}")
572
+ if image_width % patch_width != 0:
573
+ raise ValueError(f"{image_width=} must be divisible by {patch_width}")
574
+
575
+ num_patches_per_dim_h = image_height // patch_height
576
+ num_patches_per_dim_w = image_width // patch_width
577
+ num_patches = num_patches_per_dim_h * num_patches_per_dim_w
578
+ return num_patches
579
+
580
+ def patchify_image(self, image: "torch.Tensor", patch_size: Optional[Dict[str, int]] = None) -> "torch.Tensor":
581
+ """
582
+ Convert an image into a tensor of patches.
583
+
584
+ Args:
585
+ image (`torch.Tensor`):
586
+ Image to convert. Shape: [batch, channels, height, width]
587
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
588
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
589
+ """
590
+ requires_backends(self, ["torch"])
591
+ patch_size = patch_size if patch_size is not None else self.patch_size
592
+ patch_height, patch_width = patch_size["height"], patch_size["width"]
593
+
594
+ # TODO refer to https://github.com/ArthurZucker/transformers/blob/0f0a3fe5ca5697ee58faeb5b53f049af720b5e98/src/transformers/models/vit_mae/modeling_vit_mae.py#L871
595
+ # torch implementation is faster but does not handle non-squares
596
+
597
+ batch_size, channels, _, _ = image.shape
598
+ unfolded_along_height = image.unfold(2, patch_height, patch_height)
599
+ patches = unfolded_along_height.unfold(3, patch_width, patch_width)
600
+ patches = patches.contiguous()
601
+ patches = patches.view(batch_size, channels, -1, patch_height, patch_width)
602
+ patches = patches.permute(0, 2, 3, 4, 1)
603
+ patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width)
604
+ return patches
605
+
606
+ def preprocess_with_tokenizer_info(
607
+ self,
608
+ image_input: "torch.Tensor",
609
+ image_present: "torch.Tensor",
610
+ image_unpadded_h: "torch.Tensor",
611
+ image_unpadded_w: "torch.Tensor",
612
+ image_placeholder_id: int,
613
+ image_newline_id: int,
614
+ variable_sized: bool,
615
+ patch_size: Optional[Dict[str, int]] = None,
616
+ ) -> FuyuBatchFeature:
617
+ """Process images for model input. In particular, variable-sized images are handled here.
618
+
619
+ Args:
620
+ image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]):
621
+ Tensor of images padded to model input size.
622
+ image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]):
623
+ Tensor of 1s and 0s indicating whether an image is present.
624
+ image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]):
625
+ Tensor of unpadded image heights.
626
+ image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]):
627
+ Tensor of unpadded image widths.
628
+ image_placeholder_id (int):
629
+ The id of the image placeholder token. Comes from an associated tokenizer.
630
+ image_newline_id (int):
631
+ The id of the image newline token. Comes from an associated tokenizer.
632
+ variable_sized (bool):
633
+ Whether to process images as variable-sized.
634
+ patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):
635
+ Size of the patches.
636
+ """
637
+ requires_backends(self, ["torch"])
638
+
639
+ patch_size = patch_size if patch_size is not None else self.patch_size
640
+ patch_height, patch_width = patch_size["height"], patch_size["width"]
641
+
642
+ # Only images that are present.
643
+ images: List[List[torch.Tensor]] = []
644
+ batch_image_patches: List[List[torch.Tensor]] = []
645
+ # Image input ids for every subsequence, including ones with no image present.
646
+ batch_image_input_ids: List[List[torch.Tensor]] = []
647
+ for batch_index in range(image_input.shape[0]):
648
+ image_input_ids = []
649
+ image_patches = []
650
+ for subseq_index in range(image_input.shape[1]):
651
+ if image_present[batch_index, subseq_index]:
652
+ image = image_input[batch_index, subseq_index]
653
+ image_height, image_width = image.shape[1], image.shape[2]
654
+ if variable_sized:
655
+ # The min() is required here due to floating point issues:
656
+ # math.ceil(torch.tensor(300).cuda() / 30) == 11
657
+ new_h = min(
658
+ image_height,
659
+ math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height,
660
+ )
661
+ new_w = min(
662
+ image_width,
663
+ math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width,
664
+ )
665
+ image = image[:, :new_h, :new_w]
666
+ image_height, image_width = new_h, new_w
667
+
668
+ num_patches = self.get_num_patches(image_height=image_height, image_width=image_width)
669
+ tensor_of_image_ids = torch.full(
670
+ [num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device
671
+ )
672
+ patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0)
673
+ assert num_patches == patches.shape[0]
674
+
675
+ if variable_sized:
676
+ # Now terminate each line with |NEWLINE|.
677
+ tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width)
678
+ newline_ids = torch.full(
679
+ [tensor_of_image_ids.shape[0], 1],
680
+ image_newline_id,
681
+ dtype=torch.int32,
682
+ device=image_input.device,
683
+ )
684
+ tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1)
685
+ tensor_of_image_ids = tensor_of_image_ids.reshape(-1)
686
+
687
+ images.append([image])
688
+ image_input_ids.append(tensor_of_image_ids)
689
+ image_patches.append(patches)
690
+ else:
691
+ image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device))
692
+
693
+ batch_image_input_ids.append(image_input_ids)
694
+ batch_image_patches.append(image_patches)
695
+
696
+ # Create image_patch_input_indices, where non-negative values correspond to image patches to be inserted in
697
+ # the stream.
698
+ image_patch_indices_per_batch: List[List[torch.Tensor]] = []
699
+ image_patch_indices_per_subsequence: List[List[torch.Tensor]] = []
700
+
701
+ for sample_image_input_ids in batch_image_input_ids:
702
+ index_offset = 0
703
+ per_batch_indices = []
704
+ per_subsequence_indices = []
705
+ for subseq_image_input_ids in sample_image_input_ids:
706
+ # Indices of image patches.
707
+ patches_mask = subseq_image_input_ids == image_placeholder_id
708
+ num_patches = torch.count_nonzero(patches_mask)
709
+ indices = torch.arange(num_patches, dtype=torch.int64, device=subseq_image_input_ids.device).type_as(
710
+ subseq_image_input_ids
711
+ )
712
+
713
+ # Place those indices in the image input ids token stream, with -1 representing non-index tokens.
714
+ indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1)
715
+ indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1)
716
+ patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0]
717
+
718
+ indices_in_stream_per_batch[patches_inds] = indices + index_offset
719
+ indices_in_stream_per_subsequence[patches_inds] = indices
720
+
721
+ per_batch_indices.append(indices_in_stream_per_batch)
722
+ per_subsequence_indices.append(indices_in_stream_per_subsequence)
723
+ index_offset += num_patches
724
+
725
+ image_patch_indices_per_batch.append(per_batch_indices)
726
+ image_patch_indices_per_subsequence.append(per_subsequence_indices)
727
+
728
+ return FuyuBatchFeature(
729
+ data={
730
+ "images": images,
731
+ "image_input_ids": batch_image_input_ids,
732
+ "image_patches": batch_image_patches,
733
+ "image_patch_indices_per_batch": image_patch_indices_per_batch,
734
+ "image_patch_indices_per_subsequence": image_patch_indices_per_subsequence,
735
+ }
736
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/modeling_fuyu.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Fuyu model."""
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.utils.checkpoint
20
+ from torch import nn
21
+
22
+ from ...modeling_outputs import CausalLMOutputWithPast
23
+ from ...modeling_utils import PreTrainedModel
24
+ from ...models.auto.modeling_auto import AutoModelForCausalLM
25
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
26
+ from .configuration_fuyu import FuyuConfig
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ _CONFIG_FOR_DOC = "FuyuConfig"
32
+
33
+
34
+ FUYU_START_DOCSTRING = r"""
35
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
36
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
37
+ etc.)
38
+
39
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
40
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
41
+ and behavior.
42
+
43
+ Parameters:
44
+ config ([`FuyuConfig`]):
45
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
46
+ load the weights associated with the model, only the configuration. Check out the
47
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
48
+ """
49
+
50
+
51
+ @add_start_docstrings(
52
+ "The bare Fuyu Model outputting raw hidden-states without any specific head on top.",
53
+ FUYU_START_DOCSTRING,
54
+ )
55
+ class FuyuPreTrainedModel(PreTrainedModel):
56
+ config_class = FuyuConfig
57
+ base_model_prefix = "fuyu"
58
+ supports_gradient_checkpointing = True
59
+ _no_split_modules = []
60
+ _skip_keys_device_placement = "past_key_values"
61
+
62
+ def _init_weights(self, module):
63
+ std = self.config.initializer_range
64
+ if isinstance(module, nn.Linear):
65
+ module.weight.data.normal_(mean=0.0, std=std)
66
+ if module.bias is not None:
67
+ module.bias.data.zero_()
68
+ elif isinstance(module, nn.Embedding):
69
+ module.weight.data.normal_(mean=0.0, std=std)
70
+ if module.padding_idx is not None:
71
+ module.weight.data[module.padding_idx].zero_()
72
+
73
+
74
+ FUYU_INPUTS_DOCSTRING = r"""
75
+ Args:
76
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
77
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
78
+ it.
79
+
80
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
81
+ [`PreTrainedTokenizer.__call__`] for details.
82
+
83
+ [What are input IDs?](../glossary#input-ids)
84
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
85
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
86
+
87
+ - 1 for tokens that are **not masked**,
88
+ - 0 for tokens that are **masked**.
89
+
90
+ [What are attention masks?](../glossary#attention-mask)
91
+
92
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
93
+ [`PreTrainedTokenizer.__call__`] for details.
94
+
95
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
96
+ `past_key_values`).
97
+
98
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
99
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
100
+ information on the default strategy.
101
+
102
+ - 1 indicates the head is **not masked**,
103
+ - 0 indicates the head is **masked**.
104
+ image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*):
105
+ Image patches to be used as continuous embeddings. The patches are flattened and then projected to the
106
+ hidden size of the model.
107
+ image_patches_indices (`torch.LongTensor` of shape `(batch_size, num_total_patches + number_of_newline_tokens + number_of_text_tokens, patch_size_ x patch_size x num_channels )`, *optional*):
108
+ Indices indicating at which position the image_patches have to be inserted in input_embeds.
109
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
110
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
111
+ config.n_positions - 1]`.
112
+
113
+ [What are position IDs?](../glossary#position-ids)
114
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
115
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
116
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
117
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
118
+
119
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
120
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
121
+
122
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
123
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
124
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
125
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
126
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
127
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
128
+ model's internal embedding lookup matrix.
129
+ use_cache (`bool`, *optional*):
130
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
131
+ `past_key_values`).
132
+ output_attentions (`bool`, *optional*):
133
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
134
+ tensors for more detail.
135
+ output_hidden_states (`bool`, *optional*):
136
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
137
+ more detail.
138
+ return_dict (`bool`, *optional*):
139
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
140
+ """
141
+
142
+
143
+ @add_start_docstrings(
144
+ "Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.",
145
+ FUYU_START_DOCSTRING,
146
+ )
147
+ class FuyuForCausalLM(FuyuPreTrainedModel):
148
+ def __init__(self, config: FuyuConfig):
149
+ super().__init__(config)
150
+ self.padding_idx = config.pad_token_id
151
+ self.vocab_size = config.vocab_size
152
+ self.language_model = AutoModelForCausalLM.from_config(config.text_config)
153
+
154
+ self.vision_embed_tokens = nn.Linear(
155
+ config.patch_size * config.patch_size * config.num_channels, config.hidden_size
156
+ )
157
+
158
+ self.gradient_checkpointing = False
159
+ # Initialize weights and apply final processing
160
+ self.post_init()
161
+
162
+ def get_input_embeddings(self):
163
+ return self.language_model.get_input_embeddings()
164
+
165
+ def set_input_embeddings(self, value):
166
+ self.language_model.set_input_embeddings(value)
167
+
168
+ def gather_continuous_embeddings(
169
+ self,
170
+ word_embeddings: torch.Tensor,
171
+ continuous_embeddings: List[torch.Tensor],
172
+ image_patch_input_indices: torch.Tensor,
173
+ ) -> torch.Tensor:
174
+ """This function places the continuous_embeddings into the word_embeddings at the locations
175
+ indicated by image_patch_input_indices. Different batch elements can have different numbers of continuous
176
+ embeddings.
177
+
178
+ Args:
179
+ word_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
180
+ Tensor of word embeddings.
181
+ continuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
182
+ Tensor of continuous embeddings. The length of the list is the batch size. Each entry is shape
183
+ [num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative
184
+ indices in image_patch_input_indices for that batch element.
185
+ image_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
186
+ Tensor of indices of the image patches in the input_ids tensor.
187
+ """
188
+ if not (word_embeddings.shape[0] == len(continuous_embeddings)):
189
+ raise ValueError(
190
+ f"Batch sizes must match! Got {len(continuous_embeddings)=} and {word_embeddings.shape[0]=}"
191
+ )
192
+
193
+ output_embeddings = word_embeddings.clone()
194
+ for batch_idx in range(word_embeddings.shape[0]):
195
+ # First, find the positions of all the non-negative values in image_patch_input_indices, those are the
196
+ # positions in word_embeddings that we want to replace with content from continuous_embeddings.
197
+ dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0]
198
+ # Next look up those indices in image_patch_input_indices to find the indices in continuous_embeddings that we
199
+ # want to use to replace the values in word_embeddings.
200
+ src_indices = image_patch_input_indices[batch_idx][dst_indices]
201
+ # Check if we have more indices than embeddings. Note that we could have fewer indices if images got truncated.
202
+ if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]:
203
+ raise ValueError(
204
+ f"Number of continuous embeddings {continuous_embeddings[batch_idx].shape=} does not match "
205
+ f"number of continuous token ids {src_indices.shape=} in batch element {batch_idx}."
206
+ )
207
+ output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices]
208
+ return output_embeddings
209
+
210
+ @add_start_docstrings_to_model_forward(FUYU_INPUTS_DOCSTRING)
211
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
212
+ def forward(
213
+ self,
214
+ input_ids: torch.LongTensor = None,
215
+ image_patches: torch.Tensor = None, # [batch_size, num_total_patches, patch_size_ x patch_size x num_channels ]
216
+ image_patches_indices: torch.Tensor = None,
217
+ attention_mask: Optional[torch.Tensor] = None,
218
+ position_ids: Optional[torch.LongTensor] = None,
219
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
220
+ inputs_embeds: Optional[torch.FloatTensor] = None,
221
+ use_cache: Optional[bool] = None,
222
+ labels: Optional[torch.Tensor] = None,
223
+ output_attentions: Optional[bool] = None,
224
+ output_hidden_states: Optional[bool] = None,
225
+ return_dict: Optional[bool] = None,
226
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
227
+ r"""
228
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
229
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
230
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
231
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
232
+
233
+ Returns:
234
+
235
+ Examples:
236
+
237
+ ```python
238
+ >>> from transformers import FuyuProcessor, FuyuForCausalLM
239
+ >>> from PIL import Image
240
+ >>> import requests
241
+
242
+ >>> processor = FuyuProcessor.from_pretrained("adept/fuyu-8b")
243
+ >>> model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b")
244
+
245
+ >>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png"
246
+ >>> image = Image.open(requests.get(url, stream=True).raw)
247
+ >>> prompt = "Generate a coco-style caption.\n"
248
+
249
+ >>> inputs = processor(text=prompt, images=image, return_tensors="pt")
250
+ >>> outputs = model(**inputs)
251
+
252
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=7)
253
+ >>> generation_text = processor.batch_decode(generated_ids[:, -7:], skip_special_tokens=True)
254
+ >>> print(generation_text[0])
255
+ A blue bus parked on the side of a road.
256
+ ```"""
257
+
258
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
259
+ output_hidden_states = (
260
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
261
+ )
262
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
263
+
264
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
265
+
266
+ if input_ids is not None and inputs_embeds is not None:
267
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
268
+ elif input_ids is not None:
269
+ batch_size, seq_length = input_ids.shape
270
+ elif inputs_embeds is not None:
271
+ batch_size, seq_length, _ = inputs_embeds.shape
272
+ else:
273
+ raise ValueError("You have to specify either input_is or inputs_embeds")
274
+
275
+ seq_length_with_past = seq_length
276
+ past_key_values_length = 0
277
+
278
+ if past_key_values is not None:
279
+ past_key_values_length = past_key_values[0][0].shape[2]
280
+ seq_length_with_past = seq_length_with_past + past_key_values_length
281
+
282
+ if position_ids is None:
283
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
284
+ position_ids = torch.arange(
285
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
286
+ )
287
+ position_ids = position_ids.unsqueeze(0)
288
+
289
+ if inputs_embeds is None:
290
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
291
+ if image_patches is not None and past_key_values is None:
292
+ patch_embeddings = [
293
+ self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype))
294
+ .squeeze(0)
295
+ .to(inputs_embeds.device)
296
+ for patch in image_patches
297
+ ]
298
+ inputs_embeds = self.gather_continuous_embeddings(
299
+ word_embeddings=inputs_embeds,
300
+ continuous_embeddings=patch_embeddings,
301
+ image_patch_input_indices=image_patches_indices,
302
+ )
303
+
304
+ outputs = self.language_model(
305
+ inputs_embeds=inputs_embeds,
306
+ attention_mask=attention_mask,
307
+ position_ids=position_ids,
308
+ past_key_values=past_key_values,
309
+ output_attentions=output_attentions,
310
+ output_hidden_states=output_hidden_states,
311
+ labels=labels,
312
+ use_cache=use_cache,
313
+ return_dict=return_dict,
314
+ )
315
+
316
+ return outputs
317
+
318
+ def prepare_inputs_for_generation(
319
+ self,
320
+ input_ids,
321
+ past_key_values=None,
322
+ attention_mask=None,
323
+ inputs_embeds=None,
324
+ image_patches=None,
325
+ image_patches_indices=None,
326
+ **kwargs,
327
+ ):
328
+ if past_key_values:
329
+ input_ids = input_ids[:, -1:]
330
+
331
+ position_ids = kwargs.get("position_ids", None)
332
+ if attention_mask is not None and position_ids is None:
333
+ # create position_ids on the fly for batch generation
334
+ position_ids = attention_mask.long().cumsum(-1) - 1
335
+ position_ids.masked_fill_(attention_mask == 0, 1)
336
+ if past_key_values:
337
+ position_ids = position_ids[:, -1].unsqueeze(-1)
338
+
339
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
340
+ if inputs_embeds is not None and past_key_values is None:
341
+ model_inputs = {"inputs_embeds": inputs_embeds}
342
+ else:
343
+ model_inputs = {"input_ids": input_ids}
344
+
345
+ if image_patches_indices is not None:
346
+ model_inputs["image_patches_indices"] = image_patches_indices
347
+
348
+ model_inputs.update(
349
+ {
350
+ "position_ids": position_ids,
351
+ "past_key_values": past_key_values,
352
+ "use_cache": kwargs.get("use_cache"),
353
+ "attention_mask": attention_mask,
354
+ "image_patches_indices": image_patches_indices if past_key_values is None else None,
355
+ "image_patches": image_patches if past_key_values is None else None,
356
+ }
357
+ )
358
+ return model_inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/fuyu/processing_fuyu.py ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for GIT
17
+ """
18
+ import re
19
+ from typing import Dict, List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...tokenization_utils_base import PaddingStrategy, TruncationStrategy
25
+ from ...utils import TensorType, is_torch_available, logging, requires_backends
26
+
27
+
28
+ if is_torch_available():
29
+ from .image_processing_fuyu import FuyuBatchFeature
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ if is_torch_available():
36
+ import torch
37
+
38
+
39
+ TEXT_REPR_BBOX_OPEN = "<box>"
40
+ TEXT_REPR_BBOX_CLOSE = "</box>"
41
+ TEXT_REPR_POINT_OPEN = "<point>"
42
+ TEXT_REPR_POINT_CLOSE = "</point>"
43
+
44
+ TOKEN_BBOX_OPEN_STRING = "<0x00>" # <bbox>
45
+ TOKEN_BBOX_CLOSE_STRING = "<0x01>" # </bbox>
46
+ TOKEN_POINT_OPEN_STRING = "<0x02>" # <point>
47
+ TOKEN_POINT_CLOSE_STRING = "<0x03>" # </point>
48
+ BEGINNING_OF_ANSWER_STRING = "<0x04>" # <boa>
49
+
50
+
51
+ def full_unpacked_stream_to_tensor(
52
+ all_bi_tokens_to_place: List[int],
53
+ full_unpacked_stream: List["torch.Tensor"],
54
+ fill_value: int,
55
+ batch_size: int,
56
+ new_seq_len: int,
57
+ offset: int,
58
+ ) -> "torch.Tensor":
59
+ """Takes an unpacked stream of tokens (i.e. a list of tensors, one for each item in the batch) and does
60
+ the required padding to create a single tensor for the batch of shape batch_size x new_seq_len.
61
+ """
62
+
63
+ assert len(all_bi_tokens_to_place) == batch_size
64
+ assert len(full_unpacked_stream) == batch_size
65
+
66
+ # Create padded tensors for the full batch.
67
+ new_padded_tensor = torch.full(
68
+ [batch_size, new_seq_len],
69
+ fill_value=fill_value,
70
+ dtype=full_unpacked_stream[0].dtype,
71
+ device=full_unpacked_stream[0].device,
72
+ )
73
+
74
+ # Place each batch entry into the batch tensor.
75
+ for bi in range(batch_size):
76
+ tokens_to_place = all_bi_tokens_to_place[bi]
77
+ new_padded_tensor[bi, :tokens_to_place] = full_unpacked_stream[bi][offset : tokens_to_place + offset]
78
+
79
+ return new_padded_tensor
80
+
81
+
82
+ def construct_full_unpacked_stream(
83
+ num_real_text_tokens: Union[List[List[int]], "torch.Tensor"],
84
+ input_stream: "torch.Tensor",
85
+ image_tokens: List[List["torch.Tensor"]],
86
+ batch_size: int,
87
+ num_sub_sequences: int,
88
+ ) -> List["torch.Tensor"]:
89
+ """Takes an input_stream tensor of shape B x S x ?. For each subsequence, adds any required
90
+ padding to account for images and then unpacks the subsequences to create a single sequence per item in the batch.
91
+ Returns a list of tensors, one for each item in the batch."""
92
+
93
+ all_bi_stream = []
94
+
95
+ for batch_index in range(batch_size):
96
+ all_si_stream = []
97
+
98
+ # First, construct full token stream (including image placeholder tokens) and loss mask for each subsequence
99
+ # and append to lists. We use lists rather than tensors because each subsequence is variable-sized.
100
+ # TODO Remove this logic in a subsequent release since subsequences are not supported.
101
+ image_adjustment = image_tokens[batch_index][0]
102
+ subsequence_stream = torch.cat([image_adjustment, input_stream[batch_index, 0]], dim=0)
103
+ num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[batch_index][0]
104
+ all_si_stream.append(subsequence_stream[:num_real_tokens])
105
+ all_bi_stream.append(torch.cat(all_si_stream, dim=0))
106
+
107
+ return all_bi_stream
108
+
109
+
110
+ def _replace_string_repr_with_token_tags(prompt: str) -> str:
111
+ prompt = prompt.replace(TEXT_REPR_POINT_OPEN, TOKEN_POINT_OPEN_STRING)
112
+ prompt = prompt.replace(TEXT_REPR_POINT_CLOSE, TOKEN_POINT_CLOSE_STRING)
113
+ prompt = prompt.replace(TEXT_REPR_BBOX_OPEN, TOKEN_BBOX_OPEN_STRING)
114
+ prompt = prompt.replace(TEXT_REPR_BBOX_CLOSE, TOKEN_BBOX_CLOSE_STRING)
115
+ return prompt
116
+
117
+
118
+ def _segment_prompt_into_text_token_conversions(prompt: str) -> List:
119
+ """
120
+ Given a string prompt, converts the prompt into a list of TextTokenConversions.
121
+ """
122
+ # Wherever, we notice the [TOKEN_OPEN_STRING, TOKEN_CLOSE_STRING], we split the prompt
123
+ prompt_text_list: List = []
124
+ regex_pattern = re.compile(
125
+ f"({TOKEN_BBOX_OPEN_STRING}|{TOKEN_BBOX_CLOSE_STRING}|{TOKEN_POINT_OPEN_STRING}|{TOKEN_POINT_CLOSE_STRING})"
126
+ )
127
+ # Split by the regex pattern
128
+ prompt_split = regex_pattern.split(prompt)
129
+ for i, elem in enumerate(prompt_split):
130
+ if len(elem) == 0 or elem in [
131
+ TOKEN_BBOX_OPEN_STRING,
132
+ TOKEN_BBOX_CLOSE_STRING,
133
+ TOKEN_POINT_OPEN_STRING,
134
+ TOKEN_POINT_CLOSE_STRING,
135
+ ]:
136
+ continue
137
+ prompt_text_list.append(
138
+ (elem, i > 1 and prompt_split[i - 1] in [TOKEN_BBOX_OPEN_STRING, TOKEN_POINT_OPEN_STRING])
139
+ )
140
+ return prompt_text_list
141
+
142
+
143
+ def _transform_coordinates_and_tokenize(prompt: str, scale_factor: float, tokenizer) -> List[int]:
144
+ """
145
+ This function transforms the prompt in the following fashion:
146
+ - <box> <point> and </box> </point> to their respective token mappings
147
+ - extract the coordinates from the tag
148
+ - transform the coordinates into the transformed image space
149
+ - return the prompt tokens with the transformed coordinates and new tags
150
+
151
+ Bounding boxes and points MUST be in the following format: <box>y1, x1, y2, x2</box> <point>x, y</point> The spaces
152
+ and punctuation added above are NOT optional.
153
+ """
154
+ # Make a namedtuple that stores "text" and "is_bbox"
155
+
156
+ # We want to do the following: Tokenize the code normally -> when we see a point or box, tokenize using the tokenize_within_tag function
157
+ # When point or box close tag, continue tokenizing normally
158
+ # First, we replace the point and box tags with their respective tokens
159
+ prompt = _replace_string_repr_with_token_tags(prompt)
160
+ # Tokenize the prompt
161
+ # Convert prompt into a list split
162
+ prompt_text_list = _segment_prompt_into_text_token_conversions(prompt)
163
+ transformed_prompt_tokens: List[int] = []
164
+ for elem in prompt_text_list:
165
+ if elem[1]:
166
+ # This is a location, we need to tokenize it
167
+ within_tag_tokenized = _transform_within_tags(elem[0], scale_factor, tokenizer)
168
+ # Surround the text with the open and close tags
169
+ transformed_prompt_tokens.extend(within_tag_tokenized)
170
+ else:
171
+ transformed_prompt_tokens.extend(tokenizer(elem[0], add_special_tokens=False).input_ids)
172
+ return transformed_prompt_tokens
173
+
174
+
175
+ def _transform_within_tags(text: str, scale_factor: float, tokenizer) -> List[int]:
176
+ """
177
+ Given a bounding box of the fashion <box>1, 2, 3, 4</box> | <point>1, 2</point> This function is responsible for
178
+ converting 1, 2, 3, 4 into tokens of 1 2 3 4 without any commas.
179
+ """
180
+ # Convert the text into a list of strings.
181
+ num_int_strs = text.split(",")
182
+ if len(num_int_strs) == 2:
183
+ # If there are any open or close tags, remove them.
184
+ token_space_open_string = tokenizer.vocab[TOKEN_POINT_OPEN_STRING]
185
+ token_space_close_string = tokenizer.vocab[TOKEN_POINT_CLOSE_STRING]
186
+ else:
187
+ token_space_open_string = tokenizer.vocab[TOKEN_BBOX_OPEN_STRING]
188
+ token_space_close_string = tokenizer.vocab[TOKEN_BBOX_CLOSE_STRING]
189
+
190
+ # Remove all spaces from num_ints
191
+ num_ints = [float(num.strip()) for num in num_int_strs]
192
+ # scale to transformed image siz
193
+ if len(num_ints) == 2:
194
+ num_ints_translated = scale_point_to_transformed_image(x=num_ints[0], y=num_ints[1], scale_factor=scale_factor)
195
+ elif len(num_ints) == 4:
196
+ num_ints_translated = scale_bbox_to_transformed_image(
197
+ top=num_ints[0],
198
+ left=num_ints[1],
199
+ bottom=num_ints[2],
200
+ right=num_ints[3],
201
+ scale_factor=scale_factor,
202
+ )
203
+ else:
204
+ raise ValueError(f"Invalid number of ints: {len(num_ints)}")
205
+ # Tokenize the text, skipping the
206
+ tokens = [tokenizer.vocab[str(num)] for num in num_ints_translated]
207
+ return [token_space_open_string] + tokens + [token_space_close_string]
208
+
209
+
210
+ def _tokenize_prompts_with_image_and_batch(
211
+ tokenizer,
212
+ prompts: List[List[str]],
213
+ scale_factors: Optional[List[List["torch.Tensor"]]],
214
+ max_tokens_to_generate: int,
215
+ max_position_embeddings: int,
216
+ add_BOS: bool, # Same issue with types as above
217
+ add_beginning_of_answer_token: bool,
218
+ ) -> Tuple["torch.Tensor", "torch.Tensor"]:
219
+ """
220
+ Given a set of prompts and number of tokens to generate:
221
+ - tokenize prompts
222
+ - set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate
223
+ - pad all the sequences to this length so we can convert them into a 3D tensor.
224
+ """
225
+
226
+ # If not tool use, tranform the coordinates while tokenizing
227
+ if scale_factors is not None:
228
+ transformed_prompt_tokens = []
229
+ for prompt_seq, scale_factor_seq in zip(prompts, scale_factors):
230
+ transformed_prompt_tokens.append(
231
+ [
232
+ _transform_coordinates_and_tokenize(prompt, scale_factor.item(), tokenizer)
233
+ for prompt, scale_factor in zip(prompt_seq, scale_factor_seq)
234
+ ]
235
+ )
236
+ else:
237
+ transformed_prompt_tokens = [[tokenizer.tokenize(prompt) for prompt in prompt_seq] for prompt_seq in prompts]
238
+
239
+ prompts_tokens = transformed_prompt_tokens
240
+
241
+ if add_BOS:
242
+ bos_token = tokenizer.vocab["<s>"]
243
+ else:
244
+ bos_token = tokenizer.vocab["|ENDOFTEXT|"]
245
+ prompts_tokens = [[[bos_token] + x for x in prompt_seq] for prompt_seq in prompts_tokens]
246
+ if add_beginning_of_answer_token:
247
+ boa = tokenizer.vocab[BEGINNING_OF_ANSWER_STRING]
248
+ # Only add bbox open token to the last subsequence since that is what will be completed
249
+ for token_seq in prompts_tokens:
250
+ token_seq[-1].append(boa)
251
+
252
+ # Now we have a list of list of tokens which each list has a different
253
+ # size. We want to extend this list to:
254
+ # - incorporate the tokens that need to be generated
255
+ # - make all the sequences equal length.
256
+ # Get the prompts length.
257
+
258
+ prompts_length = [[len(x) for x in prompts_tokens_seq] for prompts_tokens_seq in prompts_tokens]
259
+ # Get the max prompts length.
260
+ max_prompt_len: int = np.max(prompts_length)
261
+ # Number of tokens in the each sample of the batch.
262
+ samples_length = min(max_prompt_len + max_tokens_to_generate, max_position_embeddings)
263
+ if max_prompt_len + max_tokens_to_generate > max_position_embeddings:
264
+ logger.warning(
265
+ f"Max subsequence prompt length of {max_prompt_len} + max tokens to generate {max_tokens_to_generate}",
266
+ f"exceeds context length of {max_position_embeddings}. Will generate as many tokens as possible.",
267
+ )
268
+ # Now update the list of list to be of the same size: samples_length.
269
+ for prompt_tokens_seq, prompts_length_seq in zip(prompts_tokens, prompts_length):
270
+ for prompt_tokens, prompt_length in zip(prompt_tokens_seq, prompts_length_seq):
271
+ if len(prompt_tokens) > samples_length:
272
+ raise ValueError("Length of subsequence prompt exceeds sequence length.")
273
+ padding_size = samples_length - prompt_length
274
+ prompt_tokens.extend([tokenizer.vocab["|ENDOFTEXT|"]] * padding_size)
275
+
276
+ # Now we are in a structured format, we can convert to tensors.
277
+ prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.int64)
278
+ prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.int64)
279
+
280
+ return prompts_tokens_tensor, prompts_length_tensor
281
+
282
+
283
+ # Simplified assuming self.crop_top = self.padding_top = 0
284
+ def original_to_transformed_h_coords(original_coords, scale_h):
285
+ return np.round(original_coords * scale_h).astype(np.int32)
286
+
287
+
288
+ # Simplified assuming self.crop_left = self.padding_left = 0
289
+ def original_to_transformed_w_coords(original_coords, scale_w):
290
+ return np.round(original_coords * scale_w).astype(np.int32)
291
+
292
+
293
+ def scale_point_to_transformed_image(x: float, y: float, scale_factor: float) -> List[int]:
294
+ x_scaled = original_to_transformed_w_coords(np.array([x / 2]), scale_factor)[0]
295
+ y_scaled = original_to_transformed_h_coords(np.array([y / 2]), scale_factor)[0]
296
+ return [x_scaled, y_scaled]
297
+
298
+
299
+ def scale_bbox_to_transformed_image(
300
+ top: float, left: float, bottom: float, right: float, scale_factor: float
301
+ ) -> List[int]:
302
+ top_scaled = original_to_transformed_w_coords(np.array([top / 2]), scale_factor)[0]
303
+ left_scaled = original_to_transformed_h_coords(np.array([left / 2]), scale_factor)[0]
304
+ bottom_scaled = original_to_transformed_w_coords(np.array([bottom / 2]), scale_factor)[0]
305
+ right_scaled = original_to_transformed_h_coords(np.array([right / 2]), scale_factor)[0]
306
+ return [top_scaled, left_scaled, bottom_scaled, right_scaled]
307
+
308
+
309
+ class FuyuProcessor(ProcessorMixin):
310
+ r"""
311
+ Constructs a Fuyu processor which wraps a Fuyu image processor and a Llama tokenizer into a single processor.
312
+
313
+ [`FuyuProcessor`] offers all the functionalities of [`FuyuImageProcessor`] and [`LlamaTokenizerFast`]. See the
314
+ [`~FuyuProcessor.__call__`] and [`~FuyuProcessor.decode`] for more information.
315
+
316
+ Args:
317
+ image_processor ([`FuyuImageProcessor`]):
318
+ The image processor is a required input.
319
+ tokenizer ([`LlamaTokenizerFast`]):
320
+ The tokenizer is a required input.
321
+ """
322
+
323
+ attributes = ["image_processor", "tokenizer"]
324
+ image_processor_class = "FuyuImageProcessor"
325
+ tokenizer_class = "AutoTokenizer"
326
+
327
+ def __init__(self, image_processor, tokenizer):
328
+ super().__init__(image_processor=image_processor, tokenizer=tokenizer)
329
+ self.image_processor = image_processor
330
+ self.tokenizer = tokenizer
331
+ self.max_tokens_to_generate = 10
332
+ self.max_position_embeddings = 16384 # TODO Can't derive this from model files: where to set it?
333
+ self.pad_token_id = 0
334
+ self.dummy_image_index = -1
335
+
336
+ def _left_pad_inputs_with_attention_mask(self, model_inputs: List[Dict], return_attention_mask: bool):
337
+ max_length_input_ids = max(entry["input_ids"].shape[1] for entry in model_inputs)
338
+ max_length_image_patch_indices = max(entry["image_patches_indices"].shape[1] for entry in model_inputs)
339
+
340
+ batched_inputs = {"input_ids": [], "image_patches": [], "image_patches_indices": [], "attention_mask": []}
341
+
342
+ for entry in model_inputs:
343
+ for key, tensor in entry.items():
344
+ if key == "input_ids":
345
+ num_padding_tokens = max_length_input_ids - tensor.shape[1]
346
+ padded_input_ids = torch.cat(
347
+ [
348
+ torch.full((tensor.shape[0], num_padding_tokens), self.pad_token_id, dtype=torch.long),
349
+ tensor,
350
+ ],
351
+ dim=1,
352
+ )
353
+ batched_inputs[key].append(padded_input_ids)
354
+
355
+ attention_mask = torch.cat(
356
+ [torch.zeros(tensor.shape[0], num_padding_tokens, dtype=torch.long), torch.ones_like(tensor)],
357
+ dim=1,
358
+ )
359
+ batched_inputs["attention_mask"].append(attention_mask)
360
+
361
+ elif key == "image_patches":
362
+ # For image_patches, we don't pad but just append them to the list.
363
+ batched_inputs[key].append(tensor)
364
+
365
+ else: # for image_patches_indices
366
+ num_padding_indices = max_length_image_patch_indices - tensor.shape[1]
367
+ padded_indices = torch.cat(
368
+ [
369
+ torch.full(
370
+ (tensor.shape[0], num_padding_indices), self.dummy_image_index, dtype=torch.long
371
+ ),
372
+ tensor,
373
+ ],
374
+ dim=1,
375
+ )
376
+ batched_inputs[key].append(padded_indices)
377
+ batched_keys = ["input_ids", "image_patches_indices"]
378
+ if return_attention_mask:
379
+ batched_keys.append("attention_mask")
380
+ for key in batched_keys:
381
+ batched_inputs[key] = torch.cat(batched_inputs[key], dim=0)
382
+
383
+ return batched_inputs
384
+
385
+ def get_sample_encoding(
386
+ self,
387
+ prompts,
388
+ scale_factors,
389
+ image_unpadded_heights,
390
+ image_unpadded_widths,
391
+ image_placeholder_id,
392
+ image_newline_id,
393
+ tensor_batch_images,
394
+ ):
395
+ image_present = torch.ones(1, 1, 1)
396
+ model_image_input = self.image_processor.preprocess_with_tokenizer_info(
397
+ image_input=tensor_batch_images,
398
+ image_present=image_present,
399
+ image_unpadded_h=image_unpadded_heights,
400
+ image_unpadded_w=image_unpadded_widths,
401
+ image_placeholder_id=image_placeholder_id,
402
+ image_newline_id=image_newline_id,
403
+ variable_sized=True,
404
+ )
405
+ # FIXME max_tokens_to_generate is embedded into this processor's call.
406
+ prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch(
407
+ tokenizer=self.tokenizer,
408
+ prompts=prompts,
409
+ scale_factors=scale_factors,
410
+ max_tokens_to_generate=self.max_tokens_to_generate,
411
+ max_position_embeddings=self.max_position_embeddings,
412
+ add_BOS=True,
413
+ add_beginning_of_answer_token=True,
414
+ )
415
+ image_padded_unpacked_tokens = construct_full_unpacked_stream(
416
+ num_real_text_tokens=prompts_length,
417
+ input_stream=prompt_tokens,
418
+ image_tokens=model_image_input["image_input_ids"],
419
+ batch_size=1,
420
+ num_sub_sequences=self.subsequence_length,
421
+ )
422
+ # Construct inputs for image patch indices.
423
+ unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream(
424
+ num_real_text_tokens=prompts_length,
425
+ input_stream=torch.full_like(prompt_tokens, -1),
426
+ image_tokens=model_image_input["image_patch_indices_per_batch"],
427
+ batch_size=1,
428
+ num_sub_sequences=self.subsequence_length,
429
+ )
430
+ max_prompt_length = max(x.shape[-1] for x in image_padded_unpacked_tokens)
431
+ max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings)
432
+ tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[0].shape[0]))
433
+
434
+ # Use same packing logic for the image patch indices.
435
+ image_patch_input_indices = full_unpacked_stream_to_tensor(
436
+ all_bi_tokens_to_place=[tokens_to_place],
437
+ full_unpacked_stream=unpacked_image_patch_indices_per_batch,
438
+ fill_value=-1,
439
+ batch_size=1,
440
+ new_seq_len=max_seq_len_batch,
441
+ offset=0,
442
+ )
443
+ image_patches_tensor = torch.stack([img[0] for img in model_image_input["image_patches"]])
444
+ batch_encoding = {
445
+ "input_ids": image_padded_unpacked_tokens[0].unsqueeze(0),
446
+ "image_patches": image_patches_tensor,
447
+ "image_patches_indices": image_patch_input_indices,
448
+ }
449
+ return batch_encoding
450
+
451
+ def __call__(
452
+ self,
453
+ text=None,
454
+ images=None,
455
+ add_special_tokens: bool = True,
456
+ return_attention_mask: bool = True,
457
+ padding: Union[bool, str, PaddingStrategy] = False,
458
+ truncation: Union[bool, str, TruncationStrategy] = None,
459
+ max_length: Optional[int] = None,
460
+ stride: int = 0,
461
+ pad_to_multiple_of: Optional[int] = None,
462
+ return_overflowing_tokens: bool = False,
463
+ return_special_tokens_mask: bool = False,
464
+ return_offsets_mapping: bool = False,
465
+ return_token_type_ids: bool = False,
466
+ return_length: bool = False,
467
+ verbose: bool = True,
468
+ return_tensors: Optional[Union[str, TensorType]] = None,
469
+ **kwargs,
470
+ ) -> "FuyuBatchFeature":
471
+ """
472
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
473
+ and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to
474
+ encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
475
+ FuyuImageProcessor's [`~FuyuImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
476
+ of the above two methods for more information.
477
+
478
+ Args:
479
+ text (`str`, `List[str]`):
480
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
481
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
482
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
483
+ images (`PIL.Image.Image`, `List[PIL.Image.Image]`):
484
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
485
+ tensor. Both channels-first and channels-last formats are supported.
486
+
487
+ Returns:
488
+ [`FuyuBatchEncoding`]: A [`FuyuBatchEncoding`] with the following fields:
489
+
490
+ - **input_ids** -- Tensor of token ids to be fed to a model. Returned when `text` is not `None`.
491
+ - **image_patches** -- List of Tensor of image patches. Returned when `images` is not `None`.
492
+ - **image_patches_indices** -- Tensor of indices where patch embeddings have to be inserted by the model.
493
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model when
494
+ `return_attention_mask=True`.
495
+ """
496
+ requires_backends(self, ["torch"])
497
+
498
+ # --- Check input validity ---
499
+ if not return_attention_mask:
500
+ raise ValueError("`return_attention_mask=False` is not supported for this model.")
501
+ if text is None and images is None:
502
+ raise ValueError("You have to specify either text or images. Both cannot be None.")
503
+ if text is not None and images is None:
504
+ logger.warning("You are processing a text with no associated image. Make sure it is intended.")
505
+ self.current_processor = self.tokenizer
506
+ text_encoding = self.tokenizer(
507
+ text=text,
508
+ add_special_tokens=add_special_tokens,
509
+ padding=padding,
510
+ truncation=truncation,
511
+ max_length=max_length,
512
+ stride=stride,
513
+ pad_to_multiple_of=pad_to_multiple_of,
514
+ return_attention_mask=return_attention_mask,
515
+ return_overflowing_tokens=return_overflowing_tokens,
516
+ return_special_tokens_mask=return_special_tokens_mask,
517
+ return_offsets_mapping=return_offsets_mapping,
518
+ return_token_type_ids=return_token_type_ids,
519
+ return_length=return_length,
520
+ verbose=verbose,
521
+ return_tensors=return_tensors,
522
+ **kwargs,
523
+ )
524
+ return text_encoding
525
+
526
+ if text is None and images is not None:
527
+ logger.warning("You are processing an image with no associated text. Make sure it is intended.")
528
+ prompts = [[""]]
529
+ if text is not None and images is not None:
530
+ if isinstance(text, str):
531
+ prompts = [[text]]
532
+ elif isinstance(text, list):
533
+ prompts = [[text_seq] for text_seq in text]
534
+
535
+ # --- Preprocess images using self.image_processor ---
536
+
537
+ # FIXME - We hard code "pt" here because the rest of the processing assumes torch tensors
538
+ image_encoding = self.image_processor.preprocess(images, return_tensors="pt")
539
+ batch_images = image_encoding["images"]
540
+ image_unpadded_heights = image_encoding["image_unpadded_heights"]
541
+ image_unpadded_widths = image_encoding["image_unpadded_widths"]
542
+ scale_factors = image_encoding["image_scale_factors"]
543
+ self.subsequence_length = 1 # Each batch contains only one sequence.
544
+ self.batch_size = len(batch_images)
545
+
546
+ # --- Use self.tokenizer to get the ids of special tokens to insert into image ids ---
547
+
548
+ image_placeholder_id = self.tokenizer("|SPEAKER|", add_special_tokens=False)["input_ids"][1]
549
+ image_newline_id = self.tokenizer("|NEWLINE|", add_special_tokens=False)["input_ids"][1]
550
+ tensor_batch_images = torch.stack([img[0] for img in batch_images]).unsqueeze(1)
551
+
552
+ # --- Use self.image_processor again to obtain the full token ids and batch inputs ---
553
+ all_encodings = []
554
+
555
+ for prompt, scale_factor, image_unpadded_height, image_unpadded_width, tensor_batch_image in zip(
556
+ prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, tensor_batch_images
557
+ ):
558
+ sample_encoding = self.get_sample_encoding(
559
+ prompts=[prompt],
560
+ scale_factors=[scale_factor],
561
+ image_unpadded_heights=torch.tensor([image_unpadded_height]),
562
+ image_unpadded_widths=torch.tensor([image_unpadded_width]),
563
+ image_placeholder_id=image_placeholder_id,
564
+ image_newline_id=image_newline_id,
565
+ tensor_batch_images=tensor_batch_image.unsqueeze(0),
566
+ )
567
+ all_encodings.append(sample_encoding)
568
+ batch_encoding = self._left_pad_inputs_with_attention_mask(
569
+ model_inputs=all_encodings, return_attention_mask=return_attention_mask
570
+ )
571
+ return FuyuBatchFeature(data=batch_encoding)
572
+
573
+ def post_process_box_coordinates(self, outputs, target_sizes=None):
574
+ """
575
+ Transforms raw coordinates detected by [`FuyuForCausalLM`] to the original images' coordinate space.
576
+ Coordinates will be returned in "box" format, with the following pattern:
577
+ `<box>top, left, bottom, right</box>`
578
+
579
+ Point coordinates are not supported yet.
580
+
581
+ Args:
582
+ outputs ([`GenerateOutput`]):
583
+ Raw outputs from `generate`.
584
+ target_sizes (`torch.Tensor`, *optional*):
585
+ Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
586
+ the batch. If set, found coordinates in the output sequence are rescaled to the target sizes. If left
587
+ to None, coordinates will not be rescaled.
588
+
589
+ Returns:
590
+ `GenerateOutput`: Same output type returned by `generate`, with output token ids replaced with
591
+ boxed and possible rescaled coordinates.
592
+ """
593
+
594
+ def scale_factor_to_fit(original_size, target_size=None):
595
+ height, width = original_size
596
+ if target_size is None:
597
+ max_height = self.image_processor.size["height"]
598
+ max_width = self.image_processor.size["width"]
599
+ else:
600
+ max_height, max_width = target_size
601
+ if width <= max_width and height <= max_height:
602
+ return 1.0
603
+ return min(max_height / height, max_width / width)
604
+
605
+ def find_delimiters_pair(tokens, start_token, end_token):
606
+ start_id = self.tokenizer.convert_tokens_to_ids(start_token)
607
+ end_id = self.tokenizer.convert_tokens_to_ids(end_token)
608
+
609
+ starting_positions = (tokens == start_id).nonzero(as_tuple=True)[0]
610
+ ending_positions = (tokens == end_id).nonzero(as_tuple=True)[0]
611
+
612
+ if torch.any(starting_positions) and torch.any(ending_positions):
613
+ return (starting_positions[0], ending_positions[0])
614
+ return (None, None)
615
+
616
+ def tokens_to_boxes(tokens, original_size):
617
+ while (pair := find_delimiters_pair(tokens, TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING)) != (
618
+ None,
619
+ None,
620
+ ):
621
+ start, end = pair
622
+ if end != start + 5:
623
+ continue
624
+
625
+ # Retrieve transformed coordinates from tokens
626
+ coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end])
627
+
628
+ # Scale back to original image size and multiply by 2
629
+ scale = scale_factor_to_fit(original_size)
630
+ top, left, bottom, right = [2 * int(float(c) / scale) for c in coords]
631
+
632
+ # Replace the IDs so they get detokenized right
633
+ replacement = f" {TEXT_REPR_BBOX_OPEN}{top}, {left}, {bottom}, {right}{TEXT_REPR_BBOX_CLOSE}"
634
+ replacement = self.tokenizer.tokenize(replacement)[1:]
635
+ replacement = self.tokenizer.convert_tokens_to_ids(replacement)
636
+ replacement = torch.tensor(replacement).to(tokens)
637
+
638
+ tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0)
639
+ return tokens
640
+
641
+ def tokens_to_points(tokens, original_size):
642
+ while (pair := find_delimiters_pair(tokens, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING)) != (
643
+ None,
644
+ None,
645
+ ):
646
+ start, end = pair
647
+ if end != start + 3:
648
+ continue
649
+
650
+ # Retrieve transformed coordinates from tokens
651
+ coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end])
652
+
653
+ # Scale back to original image size and multiply by 2
654
+ scale = scale_factor_to_fit(original_size)
655
+ x, y = [2 * int(float(c) / scale) for c in coords]
656
+
657
+ # Replace the IDs so they get detokenized right
658
+ replacement = f" {TEXT_REPR_POINT_OPEN}{x}, {y}{TEXT_REPR_POINT_CLOSE}"
659
+ replacement = self.tokenizer.tokenize(replacement)[1:]
660
+ replacement = self.tokenizer.convert_tokens_to_ids(replacement)
661
+ replacement = torch.tensor(replacement).to(tokens)
662
+
663
+ tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0)
664
+ return tokens
665
+
666
+ if target_sizes is None:
667
+ target_sizes = ((self.image_processor.size["height"], self.image_processor.size["width"]),) * len(outputs)
668
+ elif target_sizes.shape[1] != 2:
669
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
670
+
671
+ if len(outputs) != len(target_sizes):
672
+ raise ValueError("Make sure that you pass in as many target sizes as output sequences")
673
+
674
+ results = []
675
+ for seq, size in zip(outputs, target_sizes):
676
+ seq = tokens_to_boxes(seq, size)
677
+ seq = tokens_to_points(seq, size)
678
+ results.append(seq)
679
+
680
+ return results
681
+
682
+ def batch_decode(self, *args, **kwargs):
683
+ """
684
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
685
+ refer to the docstring of this method for more information.
686
+ """
687
+ return self.tokenizer.batch_decode(*args, **kwargs)
688
+
689
+ def decode(self, *args, **kwargs):
690
+ """
691
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
692
+ the docstring of this method for more information.
693
+ """
694
+ return self.tokenizer.decode(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "processing_nougat": ["NougatProcessor"],
21
+ }
22
+
23
+ try:
24
+ if not is_tokenizers_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["tokenization_nougat_fast"] = ["NougatTokenizerFast"]
30
+
31
+ try:
32
+ if not is_vision_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["image_processing_nougat"] = ["NougatImageProcessor"]
38
+
39
+
40
+ if TYPE_CHECKING:
41
+ from .processing_nougat import NougatProcessor
42
+
43
+ try:
44
+ if not is_tokenizers_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ from .tokenization_nougat_fast import NougatTokenizerFast
50
+
51
+ try:
52
+ if not is_vision_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ from .image_processing_nougat import NougatImageProcessor
58
+
59
+
60
+ else:
61
+ import sys
62
+
63
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (970 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/convert_nougat_to_hf.cpython-310.pyc ADDED
Binary file (6.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/image_processing_nougat.cpython-310.pyc ADDED
Binary file (19.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/processing_nougat.cpython-310.pyc ADDED
Binary file (5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/tokenization_nougat_fast.cpython-310.pyc ADDED
Binary file (18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/convert_nougat_to_hf.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Nougat checkpoints using the original `nougat` library. URL:
16
+ https://github.com/facebookresearch/nougat/tree/main"""
17
+
18
+ import argparse
19
+
20
+ import torch
21
+ from huggingface_hub import hf_hub_download
22
+ from nougat import NougatModel
23
+ from nougat.dataset.rasterize import rasterize_paper
24
+ from nougat.utils.checkpoint import get_checkpoint
25
+ from PIL import Image
26
+
27
+ from transformers import (
28
+ DonutSwinConfig,
29
+ DonutSwinModel,
30
+ MBartConfig,
31
+ MBartForCausalLM,
32
+ NougatImageProcessor,
33
+ NougatProcessor,
34
+ NougatTokenizerFast,
35
+ VisionEncoderDecoderModel,
36
+ )
37
+
38
+
39
+ def get_configs(model):
40
+ original_config = model.config
41
+
42
+ encoder_config = DonutSwinConfig(
43
+ image_size=original_config.input_size,
44
+ patch_size=4,
45
+ depths=original_config.encoder_layer,
46
+ num_heads=[4, 8, 16, 32],
47
+ window_size=original_config.window_size,
48
+ embed_dim=128,
49
+ )
50
+ decoder_config = MBartConfig(
51
+ is_decoder=True,
52
+ is_encoder_decoder=False,
53
+ add_cross_attention=True,
54
+ decoder_layers=original_config.decoder_layer,
55
+ max_position_embeddings=original_config.max_position_embeddings,
56
+ vocab_size=len(
57
+ model.decoder.tokenizer
58
+ ), # several special tokens are added to the vocab of XLMRobertaTokenizer, see repo on the hub (added_tokens.json)
59
+ scale_embedding=True,
60
+ add_final_layer_norm=True,
61
+ tie_word_embeddings=False,
62
+ )
63
+
64
+ return encoder_config, decoder_config
65
+
66
+
67
+ # Copied from transformers.models.donut.convert_donut_to_pytorch.rename_key
68
+ def rename_key(name):
69
+ if "encoder.model" in name:
70
+ name = name.replace("encoder.model", "encoder")
71
+ if "decoder.model" in name:
72
+ name = name.replace("decoder.model", "decoder")
73
+ if "patch_embed.proj" in name:
74
+ name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
75
+ if "patch_embed.norm" in name:
76
+ name = name.replace("patch_embed.norm", "embeddings.norm")
77
+ if name.startswith("encoder"):
78
+ if "layers" in name:
79
+ name = "encoder." + name
80
+ if "attn.proj" in name:
81
+ name = name.replace("attn.proj", "attention.output.dense")
82
+ if "attn" in name and "mask" not in name:
83
+ name = name.replace("attn", "attention.self")
84
+ if "norm1" in name:
85
+ name = name.replace("norm1", "layernorm_before")
86
+ if "norm2" in name:
87
+ name = name.replace("norm2", "layernorm_after")
88
+ if "mlp.fc1" in name:
89
+ name = name.replace("mlp.fc1", "intermediate.dense")
90
+ if "mlp.fc2" in name:
91
+ name = name.replace("mlp.fc2", "output.dense")
92
+
93
+ if name == "encoder.norm.weight":
94
+ name = "encoder.layernorm.weight"
95
+ if name == "encoder.norm.bias":
96
+ name = "encoder.layernorm.bias"
97
+
98
+ return name
99
+
100
+
101
+ # Copied from transformers.models.donut.convert_donut_to_pytorch.convert_state_dict
102
+ def convert_state_dict(orig_state_dict, model):
103
+ for key in orig_state_dict.copy().keys():
104
+ val = orig_state_dict.pop(key)
105
+
106
+ if "qkv" in key:
107
+ key_split = key.split(".")
108
+ layer_num = int(key_split[3])
109
+ block_num = int(key_split[5])
110
+ dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
111
+
112
+ if "weight" in key:
113
+ orig_state_dict[
114
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight"
115
+ ] = val[:dim, :]
116
+ orig_state_dict[
117
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"
118
+ ] = val[dim : dim * 2, :]
119
+ orig_state_dict[
120
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight"
121
+ ] = val[-dim:, :]
122
+ else:
123
+ orig_state_dict[
124
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"
125
+ ] = val[:dim]
126
+ orig_state_dict[
127
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"
128
+ ] = val[dim : dim * 2]
129
+ orig_state_dict[
130
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"
131
+ ] = val[-dim:]
132
+ elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
133
+ # HuggingFace implementation doesn't use attn_mask buffer
134
+ # and model doesn't use final LayerNorms for the encoder
135
+ pass
136
+ else:
137
+ orig_state_dict[rename_key(key)] = val
138
+
139
+ return orig_state_dict
140
+
141
+
142
+ def convert_nougat_checkpoint(model_tag, pytorch_dump_folder_path=None, push_to_hub=False):
143
+ # load original model
144
+ checkpoint_path = get_checkpoint(None, model_tag)
145
+ original_model = NougatModel.from_pretrained(checkpoint_path)
146
+ original_model.eval()
147
+
148
+ # load HuggingFace model
149
+ encoder_config, decoder_config = get_configs(original_model)
150
+ encoder = DonutSwinModel(encoder_config)
151
+ decoder = MBartForCausalLM(decoder_config)
152
+ model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)
153
+ model.eval()
154
+
155
+ state_dict = original_model.state_dict()
156
+ new_state_dict = convert_state_dict(state_dict, model)
157
+ model.load_state_dict(new_state_dict)
158
+
159
+ # verify results on PDF
160
+ filepath = hf_hub_download(repo_id="ysharma/nougat", filename="input/nougat.pdf", repo_type="space")
161
+ images = rasterize_paper(pdf=filepath, return_pil=True)
162
+ image = Image.open(images[0])
163
+
164
+ tokenizer_file = checkpoint_path / "tokenizer.json"
165
+ tokenizer = NougatTokenizerFast(tokenizer_file=str(tokenizer_file))
166
+ tokenizer.pad_token = "<pad>"
167
+ tokenizer.bos_token = "<s>"
168
+ tokenizer.eos_token = "</s>"
169
+ tokenizer.unk_token = "<unk>"
170
+ tokenizer.model_max_length = original_model.config.max_length
171
+
172
+ size = {"height": original_model.config.input_size[0], "width": original_model.config.input_size[1]}
173
+ image_processor = NougatImageProcessor(
174
+ do_align_long_axis=original_model.config.align_long_axis,
175
+ size=size,
176
+ )
177
+ processor = NougatProcessor(image_processor=image_processor, tokenizer=tokenizer)
178
+
179
+ # verify pixel_values
180
+ pixel_values = processor(image, return_tensors="pt").pixel_values
181
+ original_pixel_values = original_model.encoder.prepare_input(image).unsqueeze(0)
182
+
183
+ assert torch.allclose(original_pixel_values, pixel_values)
184
+
185
+ # verify patch embeddings
186
+ original_patch_embed = original_model.encoder.model.patch_embed(pixel_values)
187
+ patch_embeddings, _ = model.encoder.embeddings(pixel_values)
188
+ assert torch.allclose(original_patch_embed, patch_embeddings)
189
+
190
+ # verify encoder hidden states
191
+ original_last_hidden_state = original_model.encoder(pixel_values)
192
+ last_hidden_state = model.encoder(pixel_values).last_hidden_state
193
+ assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2)
194
+
195
+ # NOTE original model does not use tied weights for embeddings of decoder
196
+ original_embeddings = original_model.decoder.model.model.decoder.embed_tokens
197
+ embeddings = model.decoder.model.decoder.embed_tokens
198
+ assert torch.allclose(original_embeddings.weight, embeddings.weight, atol=1e-3)
199
+
200
+ # verify decoder hidden states
201
+ prompt = "hello world"
202
+ decoder_input_ids = original_model.decoder.tokenizer(
203
+ prompt, add_special_tokens=False, return_tensors="pt"
204
+ ).input_ids
205
+ decoder_attention_mask = torch.ones_like(decoder_input_ids)
206
+ original_logits = original_model(
207
+ image_tensors=pixel_values, decoder_input_ids=decoder_input_ids, attention_mask=decoder_attention_mask
208
+ ).logits
209
+ logits = model(
210
+ pixel_values,
211
+ decoder_input_ids=decoder_input_ids[:, :-1],
212
+ decoder_attention_mask=decoder_attention_mask[:, :-1],
213
+ ).logits
214
+ assert torch.allclose(original_logits, logits, atol=1e-3)
215
+
216
+ # verify generation
217
+ outputs = model.generate(
218
+ pixel_values,
219
+ min_length=1,
220
+ max_length=30,
221
+ pad_token_id=tokenizer.pad_token_id,
222
+ eos_token_id=tokenizer.eos_token_id,
223
+ use_cache=True,
224
+ bad_words_ids=[
225
+ [tokenizer.unk_token_id],
226
+ ],
227
+ return_dict_in_generate=True,
228
+ do_sample=False,
229
+ )
230
+ generated = tokenizer.batch_decode(outputs.sequences, skip_special_tokens=True)[0]
231
+
232
+ if model_tag == "0.1.0-base":
233
+ expected_generation = "# Nougat: Neural Optical Understanding for Academic Documents\n\nLukas Blecher\n\nCorrespondence to: lblec"
234
+ elif model_tag == "0.1.0-small":
235
+ expected_generation = (
236
+ "# Nougat: Neural Optical Understanding for Academic Documents\n\nLukas Blecher\n\nCorrespondence to: lble"
237
+ )
238
+ else:
239
+ raise ValueError(f"Unexpected model tag: {model_tag}")
240
+
241
+ assert generated == expected_generation
242
+ print("Looks ok!")
243
+
244
+ if pytorch_dump_folder_path is not None:
245
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
246
+ model.save_pretrained(pytorch_dump_folder_path)
247
+ processor.save_pretrained(pytorch_dump_folder_path)
248
+
249
+ if push_to_hub:
250
+ tag_to_name = {"0.1.0-base": "nougat-base", "0.1.0-small": "nougat-small"}
251
+ model_name = tag_to_name[model_tag]
252
+
253
+ model.push_to_hub(f"facebook/{model_name}")
254
+ processor.push_to_hub(f"facebook/{model_name}")
255
+
256
+
257
+ if __name__ == "__main__":
258
+ parser = argparse.ArgumentParser()
259
+ # Required parameters
260
+ parser.add_argument(
261
+ "--model_tag",
262
+ default="0.1.0-base",
263
+ required=False,
264
+ type=str,
265
+ choices=["0.1.0-base", "0.1.0-small"],
266
+ help="Tag of the original model you'd like to convert.",
267
+ )
268
+ parser.add_argument(
269
+ "--pytorch_dump_folder_path",
270
+ default=None,
271
+ required=False,
272
+ type=str,
273
+ help="Path to the output PyTorch model directory.",
274
+ )
275
+ parser.add_argument(
276
+ "--push_to_hub",
277
+ action="store_true",
278
+ help="Whether or not to push the converted model and processor to the 🤗 hub.",
279
+ )
280
+
281
+ args = parser.parse_args()
282
+ convert_nougat_checkpoint(args.model_tag, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/image_processing_nougat.py ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Nougat."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ pad,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ to_pil_image,
28
+ )
29
+ from ...image_utils import (
30
+ IMAGENET_DEFAULT_MEAN,
31
+ IMAGENET_DEFAULT_STD,
32
+ ChannelDimension,
33
+ ImageInput,
34
+ PILImageResampling,
35
+ get_image_size,
36
+ infer_channel_dimension_format,
37
+ is_scaled_image,
38
+ make_list_of_images,
39
+ to_numpy_array,
40
+ valid_images,
41
+ validate_kwargs,
42
+ validate_preprocess_arguments,
43
+ )
44
+ from ...utils import TensorType, logging
45
+ from ...utils.import_utils import is_cv2_available, is_vision_available
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ if is_cv2_available():
52
+ pass
53
+
54
+
55
+ if is_vision_available():
56
+ import PIL
57
+
58
+
59
+ class NougatImageProcessor(BaseImageProcessor):
60
+ r"""
61
+ Constructs a Nougat image processor.
62
+
63
+ Args:
64
+ do_crop_margin (`bool`, *optional*, defaults to `True`):
65
+ Whether to crop the image margins.
66
+ do_resize (`bool`, *optional*, defaults to `True`):
67
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
68
+ `do_resize` in the `preprocess` method.
69
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 896, "width": 672}`):
70
+ Size of the image after resizing. Can be overridden by `size` in the `preprocess` method.
71
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
72
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
73
+ do_thumbnail (`bool`, *optional*, defaults to `True`):
74
+ Whether to resize the image using thumbnail method.
75
+ do_align_long_axis (`bool`, *optional*, defaults to `False`):
76
+ Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
77
+ do_pad (`bool`, *optional*, defaults to `True`):
78
+ Whether to pad the images to the largest image size in the batch.
79
+ do_rescale (`bool`, *optional*, defaults to `True`):
80
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
81
+ parameter in the `preprocess` method.
82
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
83
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
84
+ `preprocess` method.
85
+ do_normalize (`bool`, *optional*, defaults to `True`):
86
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
87
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
88
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
89
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
90
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
91
+ Image standard deviation.
92
+ """
93
+
94
+ model_input_names = ["pixel_values"]
95
+
96
+ def __init__(
97
+ self,
98
+ do_crop_margin: bool = True,
99
+ do_resize: bool = True,
100
+ size: Dict[str, int] = None,
101
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
102
+ do_thumbnail: bool = True,
103
+ do_align_long_axis: bool = False,
104
+ do_pad: bool = True,
105
+ do_rescale: bool = True,
106
+ rescale_factor: Union[int, float] = 1 / 255,
107
+ do_normalize: bool = True,
108
+ image_mean: Optional[Union[float, List[float]]] = None,
109
+ image_std: Optional[Union[float, List[float]]] = None,
110
+ **kwargs,
111
+ ) -> None:
112
+ super().__init__(**kwargs)
113
+
114
+ size = size if size is not None else {"height": 896, "width": 672}
115
+ size = get_size_dict(size)
116
+
117
+ self.do_crop_margin = do_crop_margin
118
+ self.do_resize = do_resize
119
+ self.size = size
120
+ self.resample = resample
121
+ self.do_thumbnail = do_thumbnail
122
+ self.do_align_long_axis = do_align_long_axis
123
+ self.do_pad = do_pad
124
+ self.do_rescale = do_rescale
125
+ self.rescale_factor = rescale_factor
126
+ self.do_normalize = do_normalize
127
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
128
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
129
+ self._valid_processor_keys = [
130
+ "images",
131
+ "do_crop_margin",
132
+ "do_resize",
133
+ "size",
134
+ "resample",
135
+ "do_thumbnail",
136
+ "do_align_long_axis",
137
+ "do_pad",
138
+ "do_rescale",
139
+ "rescale_factor",
140
+ "do_normalize",
141
+ "image_mean",
142
+ "image_std",
143
+ "return_tensors",
144
+ "data_format",
145
+ "input_data_format",
146
+ ]
147
+
148
+ def python_find_non_zero(self, image: np.array):
149
+ """This is a reimplementation of a findNonZero function equivalent to cv2."""
150
+ non_zero_indices = np.column_stack(np.nonzero(image))
151
+ idxvec = non_zero_indices[:, [1, 0]]
152
+ idxvec = idxvec.reshape(-1, 1, 2)
153
+ return idxvec
154
+
155
+ def python_bounding_rect(self, coordinates):
156
+ """This is a reimplementation of a BoundingRect function equivalent to cv2."""
157
+ min_values = np.min(coordinates, axis=(0, 1)).astype(int)
158
+ max_values = np.max(coordinates, axis=(0, 1)).astype(int)
159
+ x_min, y_min = min_values[0], min_values[1]
160
+ width = max_values[0] - x_min + 1
161
+ height = max_values[1] - y_min + 1
162
+ return x_min, y_min, width, height
163
+
164
+ def crop_margin(
165
+ self,
166
+ image: np.array,
167
+ gray_threshold: int = 200,
168
+ data_format: Optional[ChannelDimension] = None,
169
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
170
+ ) -> np.array:
171
+ """
172
+ Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the
173
+ threshold).
174
+
175
+ Args:
176
+ image (`np.array`):
177
+ The image to be cropped.
178
+ gray_threshold (`int`, *optional*, defaults to `200`)
179
+ Value below which pixels are considered to be gray.
180
+ data_format (`ChannelDimension`, *optional*):
181
+ The channel dimension format of the output image. If unset, will use the inferred format from the
182
+ input.
183
+ input_data_format (`ChannelDimension`, *optional*):
184
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
185
+ """
186
+ if input_data_format is None:
187
+ input_data_format = infer_channel_dimension_format(image)
188
+
189
+ image = to_pil_image(image, input_data_format=input_data_format)
190
+ data = np.array(image.convert("L")).astype(np.uint8)
191
+ max_val = data.max()
192
+ min_val = data.min()
193
+ if max_val == min_val:
194
+ image = np.array(image)
195
+ image = (
196
+ to_channel_dimension_format(image, data_format, input_data_format)
197
+ if data_format is not None
198
+ else image
199
+ )
200
+ return image
201
+ data = (data - min_val) / (max_val - min_val) * 255
202
+ gray = data < gray_threshold
203
+ coords = self.python_find_non_zero(gray)
204
+ x_min, y_min, width, height = self.python_bounding_rect(coords)
205
+ image = image.crop((x_min, y_min, x_min + width, y_min + height))
206
+ image = np.array(image).astype(np.uint8)
207
+ image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST)
208
+
209
+ image = (
210
+ to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
211
+ )
212
+
213
+ return image
214
+
215
+ # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.align_long_axis
216
+ def align_long_axis(
217
+ self,
218
+ image: np.ndarray,
219
+ size: Dict[str, int],
220
+ data_format: Optional[Union[str, ChannelDimension]] = None,
221
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
222
+ ) -> np.ndarray:
223
+ """
224
+ Align the long axis of the image to the longest axis of the specified size.
225
+
226
+ Args:
227
+ image (`np.ndarray`):
228
+ The image to be aligned.
229
+ size (`Dict[str, int]`):
230
+ The size `{"height": h, "width": w}` to align the long axis to.
231
+ data_format (`str` or `ChannelDimension`, *optional*):
232
+ The data format of the output image. If unset, the same format as the input image is used.
233
+ input_data_format (`ChannelDimension` or `str`, *optional*):
234
+ The channel dimension format of the input image. If not provided, it will be inferred.
235
+
236
+ Returns:
237
+ `np.ndarray`: The aligned image.
238
+ """
239
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
240
+ output_height, output_width = size["height"], size["width"]
241
+
242
+ if (output_width < output_height and input_width > input_height) or (
243
+ output_width > output_height and input_width < input_height
244
+ ):
245
+ image = np.rot90(image, 3)
246
+
247
+ if data_format is not None:
248
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
249
+
250
+ return image
251
+
252
+ def pad_image(
253
+ self,
254
+ image: np.ndarray,
255
+ size: Dict[str, int],
256
+ data_format: Optional[Union[str, ChannelDimension]] = None,
257
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
258
+ ) -> np.ndarray:
259
+ """
260
+ Pad the image to the specified size at the top, bottom, left and right.
261
+
262
+ Args:
263
+ image (`np.ndarray`):
264
+ The image to be padded.
265
+ size (`Dict[str, int]`):
266
+ The size `{"height": h, "width": w}` to pad the image to.
267
+ data_format (`str` or `ChannelDimension`, *optional*):
268
+ The data format of the output image. If unset, the same format as the input image is used.
269
+ input_data_format (`ChannelDimension` or `str`, *optional*):
270
+ The channel dimension format of the input image. If not provided, it will be inferred.
271
+ """
272
+ output_height, output_width = size["height"], size["width"]
273
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
274
+
275
+ delta_width = output_width - input_width
276
+ delta_height = output_height - input_height
277
+
278
+ pad_top = delta_height // 2
279
+ pad_left = delta_width // 2
280
+
281
+ pad_bottom = delta_height - pad_top
282
+ pad_right = delta_width - pad_left
283
+
284
+ padding = ((pad_top, pad_bottom), (pad_left, pad_right))
285
+ return pad(image, padding, data_format=data_format, input_data_format=input_data_format)
286
+
287
+ # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.thumbnail
288
+ def thumbnail(
289
+ self,
290
+ image: np.ndarray,
291
+ size: Dict[str, int],
292
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
293
+ data_format: Optional[Union[str, ChannelDimension]] = None,
294
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
295
+ **kwargs,
296
+ ) -> np.ndarray:
297
+ """
298
+ Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
299
+ corresponding dimension of the specified size.
300
+
301
+ Args:
302
+ image (`np.ndarray`):
303
+ The image to be resized.
304
+ size (`Dict[str, int]`):
305
+ The size `{"height": h, "width": w}` to resize the image to.
306
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
307
+ The resampling filter to use.
308
+ data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
309
+ The data format of the output image. If unset, the same format as the input image is used.
310
+ input_data_format (`ChannelDimension` or `str`, *optional*):
311
+ The channel dimension format of the input image. If not provided, it will be inferred.
312
+ """
313
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
314
+ output_height, output_width = size["height"], size["width"]
315
+
316
+ # We always resize to the smallest of either the input or output size.
317
+ height = min(input_height, output_height)
318
+ width = min(input_width, output_width)
319
+
320
+ if height == input_height and width == input_width:
321
+ return image
322
+
323
+ if input_height > input_width:
324
+ width = int(input_width * height / input_height)
325
+ elif input_width > input_height:
326
+ height = int(input_height * width / input_width)
327
+
328
+ return resize(
329
+ image,
330
+ size=(height, width),
331
+ resample=resample,
332
+ reducing_gap=2.0,
333
+ data_format=data_format,
334
+ input_data_format=input_data_format,
335
+ **kwargs,
336
+ )
337
+
338
+ # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.resize
339
+ def resize(
340
+ self,
341
+ image: np.ndarray,
342
+ size: Dict[str, int],
343
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
344
+ data_format: Optional[Union[str, ChannelDimension]] = None,
345
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
346
+ **kwargs,
347
+ ) -> np.ndarray:
348
+ """
349
+ Resizes `image` to `(height, width)` specified by `size` using the PIL library.
350
+
351
+ Args:
352
+ image (`np.ndarray`):
353
+ Image to resize.
354
+ size (`Dict[str, int]`):
355
+ Size of the output image.
356
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
357
+ Resampling filter to use when resiizing the image.
358
+ data_format (`str` or `ChannelDimension`, *optional*):
359
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
360
+ input_data_format (`ChannelDimension` or `str`, *optional*):
361
+ The channel dimension format of the input image. If not provided, it will be inferred.
362
+ """
363
+ size = get_size_dict(size)
364
+ shortest_edge = min(size["height"], size["width"])
365
+ output_size = get_resize_output_image_size(
366
+ image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format
367
+ )
368
+ resized_image = resize(
369
+ image,
370
+ size=output_size,
371
+ resample=resample,
372
+ data_format=data_format,
373
+ input_data_format=input_data_format,
374
+ **kwargs,
375
+ )
376
+ return resized_image
377
+
378
+ def preprocess(
379
+ self,
380
+ images: ImageInput,
381
+ do_crop_margin: bool = None,
382
+ do_resize: bool = None,
383
+ size: Dict[str, int] = None,
384
+ resample: PILImageResampling = None,
385
+ do_thumbnail: bool = None,
386
+ do_align_long_axis: bool = None,
387
+ do_pad: bool = None,
388
+ do_rescale: bool = None,
389
+ rescale_factor: Union[int, float] = None,
390
+ do_normalize: bool = None,
391
+ image_mean: Optional[Union[float, List[float]]] = None,
392
+ image_std: Optional[Union[float, List[float]]] = None,
393
+ return_tensors: Optional[Union[str, TensorType]] = None,
394
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
395
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
396
+ **kwargs,
397
+ ) -> PIL.Image.Image:
398
+ """
399
+ Preprocess an image or batch of images.
400
+
401
+ Args:
402
+ images (`ImageInput`):
403
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255.
404
+ do_crop_margin (`bool`, *optional*, defaults to `self.do_crop_margin`):
405
+ Whether to crop the image margins.
406
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
407
+ Whether to resize the image.
408
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
409
+ Size of the image after resizing. Shortest edge of the image is resized to min(size["height"],
410
+ size["width"]) with the longest edge resized to keep the input aspect ratio.
411
+ resample (`int`, *optional*, defaults to `self.resample`):
412
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
413
+ has an effect if `do_resize` is set to `True`.
414
+ do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):
415
+ Whether to resize the image using thumbnail method.
416
+ do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):
417
+ Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
418
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
419
+ Whether to pad the images to the largest image size in the batch.
420
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
421
+ Whether to rescale the image by the specified scale `rescale_factor`.
422
+ rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
423
+ Scale factor to use if rescaling the image.
424
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
425
+ Whether to normalize the image.
426
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
427
+ Image mean to use for normalization.
428
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
429
+ Image standard deviation to use for normalization.
430
+ return_tensors (`str` or `TensorType`, *optional*):
431
+ The type of tensors to return. Can be one of:
432
+ - Unset: Return a list of `np.ndarray`.
433
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
434
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
435
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
436
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
437
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
438
+ The channel dimension format for the output image. Can be one of:
439
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
440
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
441
+ - Unset: defaults to the channel dimension format of the input image.
442
+ input_data_format (`ChannelDimension` or `str`, *optional*):
443
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
444
+ from the input image. Can be one of:
445
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
446
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
447
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
448
+ """
449
+ do_crop_margin = do_crop_margin if do_crop_margin is not None else self.do_crop_margin
450
+ do_resize = do_resize if do_resize is not None else self.do_resize
451
+ size = size if size is not None else self.size
452
+ resample = resample if resample is not None else self.resample
453
+ do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail
454
+ do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis
455
+ do_pad = do_pad if do_pad is not None else self.do_pad
456
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
457
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
458
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
459
+ image_mean = image_mean if image_mean is not None else self.image_mean
460
+ image_std = image_std if image_std is not None else self.image_std
461
+
462
+ images = make_list_of_images(images)
463
+
464
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
465
+
466
+ if not valid_images(images):
467
+ raise ValueError(
468
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
469
+ "torch.Tensor, tf.Tensor or jax.ndarray."
470
+ )
471
+ validate_preprocess_arguments(
472
+ do_rescale=do_rescale,
473
+ rescale_factor=rescale_factor,
474
+ do_normalize=do_normalize,
475
+ image_mean=image_mean,
476
+ image_std=image_std,
477
+ do_pad=do_pad,
478
+ size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg.
479
+ do_resize=do_resize,
480
+ size=size,
481
+ resample=resample,
482
+ )
483
+
484
+ # All transformations expect numpy arrays.
485
+ images = [to_numpy_array(image) for image in images]
486
+
487
+ if is_scaled_image(images[0]) and do_rescale:
488
+ logger.warning_once(
489
+ "It looks like you are trying to rescale already rescaled images. If the input"
490
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
491
+ )
492
+
493
+ if input_data_format is None:
494
+ # We assume that all images have the same channel dimension format.
495
+ input_data_format = infer_channel_dimension_format(images[0])
496
+
497
+ if do_crop_margin:
498
+ images = [self.crop_margin(image, input_data_format=input_data_format) for image in images]
499
+
500
+ if do_align_long_axis:
501
+ images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images]
502
+
503
+ if do_resize:
504
+ images = [
505
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
506
+ for image in images
507
+ ]
508
+
509
+ if do_thumbnail:
510
+ images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images]
511
+
512
+ if do_pad:
513
+ images = [self.pad_image(image=image, size=size, input_data_format=input_data_format) for image in images]
514
+
515
+ if do_rescale:
516
+ images = [
517
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
518
+ for image in images
519
+ ]
520
+
521
+ if do_normalize:
522
+ images = [
523
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
524
+ for image in images
525
+ ]
526
+
527
+ images = [
528
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
529
+ ]
530
+
531
+ data = {"pixel_values": images}
532
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/processing_nougat.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Nougat.
17
+ """
18
+
19
+ from typing import Dict, List, Optional, Union
20
+
21
+ from transformers.tokenization_utils_base import PreTokenizedInput, TextInput, TruncationStrategy
22
+
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...utils import PaddingStrategy, TensorType
25
+
26
+
27
+ class NougatProcessor(ProcessorMixin):
28
+ r"""
29
+ Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor.
30
+
31
+ [`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the
32
+ [`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ image_processor ([`NougatImageProcessor`]):
36
+ An instance of [`NougatImageProcessor`]. The image processor is a required input.
37
+ tokenizer ([`NougatTokenizerFast`]):
38
+ An instance of [`NougatTokenizerFast`]. The tokenizer is a required input.
39
+ """
40
+
41
+ attributes = ["image_processor", "tokenizer"]
42
+ image_processor_class = "AutoImageProcessor"
43
+ tokenizer_class = "AutoTokenizer"
44
+
45
+ def __init__(self, image_processor, tokenizer):
46
+ super().__init__(image_processor, tokenizer)
47
+ self.current_processor = self.image_processor
48
+
49
+ def __call__(
50
+ self,
51
+ images=None,
52
+ text=None,
53
+ do_crop_margin: bool = None,
54
+ do_resize: bool = None,
55
+ size: Dict[str, int] = None,
56
+ resample: "PILImageResampling" = None, # noqa: F821
57
+ do_thumbnail: bool = None,
58
+ do_align_long_axis: bool = None,
59
+ do_pad: bool = None,
60
+ do_rescale: bool = None,
61
+ rescale_factor: Union[int, float] = None,
62
+ do_normalize: bool = None,
63
+ image_mean: Optional[Union[float, List[float]]] = None,
64
+ image_std: Optional[Union[float, List[float]]] = None,
65
+ data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821
66
+ input_data_format: Optional[Union[str, "ChannelDimension"]] = None, # noqa: F821
67
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
68
+ text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
69
+ text_pair_target: Optional[
70
+ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]
71
+ ] = None,
72
+ add_special_tokens: bool = True,
73
+ padding: Union[bool, str, PaddingStrategy] = False,
74
+ truncation: Union[bool, str, TruncationStrategy] = None,
75
+ max_length: Optional[int] = None,
76
+ stride: int = 0,
77
+ is_split_into_words: bool = False,
78
+ pad_to_multiple_of: Optional[int] = None,
79
+ return_tensors: Optional[Union[str, TensorType]] = None,
80
+ return_token_type_ids: Optional[bool] = None,
81
+ return_attention_mask: Optional[bool] = None,
82
+ return_overflowing_tokens: bool = False,
83
+ return_special_tokens_mask: bool = False,
84
+ return_offsets_mapping: bool = False,
85
+ return_length: bool = False,
86
+ verbose: bool = True,
87
+ ):
88
+ if images is None and text is None:
89
+ raise ValueError("You need to specify either an `images` or `text` input to process.")
90
+
91
+ if images is not None:
92
+ inputs = self.image_processor(
93
+ images,
94
+ do_crop_margin=do_crop_margin,
95
+ do_resize=do_resize,
96
+ size=size,
97
+ resample=resample,
98
+ do_thumbnail=do_thumbnail,
99
+ do_align_long_axis=do_align_long_axis,
100
+ do_pad=do_pad,
101
+ do_rescale=do_rescale,
102
+ rescale_factor=rescale_factor,
103
+ do_normalize=do_normalize,
104
+ image_mean=image_mean,
105
+ image_std=image_std,
106
+ return_tensors=return_tensors,
107
+ data_format=data_format,
108
+ input_data_format=input_data_format,
109
+ )
110
+ if text is not None:
111
+ encodings = self.tokenizer(
112
+ text,
113
+ text_pair=text_pair,
114
+ text_target=text_target,
115
+ text_pair_target=text_pair_target,
116
+ add_special_tokens=add_special_tokens,
117
+ padding=padding,
118
+ truncation=truncation,
119
+ max_length=max_length,
120
+ stride=stride,
121
+ is_split_into_words=is_split_into_words,
122
+ pad_to_multiple_of=pad_to_multiple_of,
123
+ return_tensors=return_tensors,
124
+ return_token_type_ids=return_token_type_ids,
125
+ return_attention_mask=return_attention_mask,
126
+ return_overflowing_tokens=return_overflowing_tokens,
127
+ return_special_tokens_mask=return_special_tokens_mask,
128
+ return_offsets_mapping=return_offsets_mapping,
129
+ return_length=return_length,
130
+ verbose=verbose,
131
+ )
132
+
133
+ if text is None:
134
+ return inputs
135
+ elif images is None:
136
+ return encodings
137
+ else:
138
+ inputs["labels"] = encodings["input_ids"]
139
+ return inputs
140
+
141
+ def batch_decode(self, *args, **kwargs):
142
+ """
143
+ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
144
+ to the docstring of this method for more information.
145
+ """
146
+ return self.tokenizer.batch_decode(*args, **kwargs)
147
+
148
+ def decode(self, *args, **kwargs):
149
+ """
150
+ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
151
+ the docstring of this method for more information.
152
+ """
153
+ return self.tokenizer.decode(*args, **kwargs)
154
+
155
+ def post_process_generation(self, *args, **kwargs):
156
+ """
157
+ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.post_process_generation`].
158
+ Please refer to the docstring of this method for more information.
159
+ """
160
+ return self.tokenizer.post_process_generation(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/nougat/tokenization_nougat_fast.py ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Fast tokenizer class for Nougat.
17
+ """
18
+ import re
19
+ from functools import partial
20
+ from multiprocessing import Pool
21
+ from typing import List, Union
22
+
23
+ import numpy as np
24
+
25
+ from transformers.tokenization_utils_base import INIT_TOKENIZER_DOCSTRING
26
+ from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
27
+ from transformers.utils import add_end_docstrings
28
+
29
+ from ...utils import is_levenshtein_available, is_nltk_available, logging, requires_backends
30
+
31
+
32
+ if is_levenshtein_available():
33
+ from Levenshtein import ratio
34
+
35
+ if is_nltk_available():
36
+ import nltk
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ INIT_TOKENIZER_DOCSTRING += """
43
+ tokenizer_object ([`tokenizers.Tokenizer`]):
44
+ A [`tokenizers.Tokenizer`] object from 🤗 tokenizers to instantiate from. See [Using tokenizers from 🤗
45
+ tokenizers](../fast_tokenizers) for more information.
46
+ tokenizer_file ([`str`]):
47
+ A path to a local JSON file representing a previously serialized [`tokenizers.Tokenizer`] object from 🤗
48
+ tokenizers.
49
+ """
50
+
51
+
52
+ VOCAB_FILES_NAMES = {"tokenizer_file": "tokenizer.json"}
53
+
54
+
55
+ def markdown_compatible(text: str) -> str:
56
+ """
57
+ Make text compatible with Markdown formatting.
58
+
59
+ This function makes various text formatting adjustments to make it compatible with Markdown.
60
+
61
+ Args:
62
+ text (`str`):
63
+ The input text to be made Markdown-compatible.
64
+
65
+ Returns:
66
+ `str`: The Markdown-compatible text.
67
+ """
68
+ # equation tag
69
+ # Replace lines that start with a pattern like (decimal) \[some text\] with \[[some text] \tag{decimal}\].
70
+ text = re.sub(r"^\(([\d.]+[a-zA-Z]?)\) \\\[(.+?)\\\]$", r"\[\2 \\tag{\1}\]", text, flags=re.M)
71
+ # Replace lines that start with a pattern like \[some text\] (decimal) with \[[some text] \tag{decimal}\].
72
+ text = re.sub(r"^\\\[(.+?)\\\] \(([\d.]+[a-zA-Z]?)\)$", r"\[\1 \\tag{\2}\]", text, flags=re.M)
73
+ # Replace lines that start with a pattern like \[some text\] (digits) \[another text\] with \[[some text] \tag{digits}\] [another text].
74
+ text = re.sub(
75
+ r"^\\\[(.+?)\\\] \(([\d.]+[a-zA-Z]?)\) (\\\[.+?\\\])$",
76
+ r"\[\1 \\tag{\2}\] \3",
77
+ text,
78
+ flags=re.M,
79
+ )
80
+ # multi line
81
+ text = text.replace(r"\. ", ". ")
82
+ # bold formatting
83
+ text = text.replace(r"\bm{", r"\mathbf{").replace(r"{\\bm ", r"\mathbf{")
84
+ text = re.sub(r"\\mbox{ ?\\boldmath\$(.*?)\$}", r"\\mathbf{\1}", text)
85
+ # Reformat urls (http, ftp and https only) to markdown [url](url) clickable format
86
+ text = re.sub(
87
+ r"((?:http|ftp|https):\/\/(?:[\w_-]+(?:(?:\.[\w_-]+)+))(?:[\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-]))",
88
+ r"[\1](\1)",
89
+ text,
90
+ )
91
+ # algorithms
92
+ text = re.sub(r"```\s*(.+?)\s*```", r"```\n\1\n```", text, flags=re.S)
93
+
94
+ return text
95
+
96
+
97
+ def normalize_list_like_lines(generation):
98
+ """
99
+ Normalize lines in the given text that resemble list items. The function looks for lines that start optionally with
100
+ '-' or '*', possibly followed by Roman numerals or digits indicating nesting levels. The function reformats such
101
+ lines to make them more structured.
102
+
103
+ Args:
104
+ generation (str): The input text containing lines that need to be normalized.
105
+
106
+ Returns:
107
+ str: The input text with the list-like lines normalized.
108
+
109
+ Note:
110
+ The function uses regular expressions to identify and reformat the list-like lines. The patterns capture
111
+ optional bullet points, nesting levels indicated by numerals, and the actual list item content. The
112
+ normalization adjusts the bullet point style and nesting levels based on the captured patterns.
113
+ """
114
+
115
+ # This matches lines starting with - or *, not followed by - or * (lists)
116
+ # that are then numbered by digits \d or roman numerals (one or more)
117
+ # and then, optional additional numbering of this line is captured
118
+ # this is then fed to re.finditer.
119
+ pattern = r"(?:^)(-|\*)?(?!-|\*) ?((?:\d|[ixv])+ )?.+? (-|\*) (((?:\d|[ixv])+)\.(\d|[ixv]) )?.*(?:$)"
120
+
121
+ for match in reversed(list(re.finditer(pattern, generation, flags=re.I | re.M))):
122
+ start, stop = match.span()
123
+ delim = match.group(3) + " "
124
+ splits = match.group(0).split(delim)
125
+ replacement = ""
126
+
127
+ if match.group(1) is not None:
128
+ splits = splits[1:]
129
+ delim1 = match.group(1) + " "
130
+ else:
131
+ delim1 = ""
132
+ continue # Skip false positives
133
+
134
+ pre, post = generation[:start], generation[stop:]
135
+
136
+ for i, item in enumerate(splits):
137
+ level = 0
138
+ potential_numeral, _, rest = item.strip().partition(" ")
139
+ if not rest:
140
+ continue
141
+ # Infer current nesting level based on detected numbering
142
+ if re.match(r"^[\dixv]+((?:\.[\dixv])?)+$", potential_numeral, flags=re.I | re.M):
143
+ level = potential_numeral.count(".")
144
+
145
+ replacement += (
146
+ ("\n" if i > 0 else "") + ("\t" * level) + (delim if i > 0 or start == 0 else delim1) + item.strip()
147
+ )
148
+
149
+ if post == "":
150
+ post = "\n"
151
+
152
+ generation = pre + replacement + post
153
+
154
+ return generation
155
+
156
+
157
+ def find_next_punctuation(text: str, start_idx=0):
158
+ """
159
+ Find the index of the next punctuation mark.
160
+
161
+ Args:
162
+ text (`str`):
163
+ String to examine
164
+ start_idx (`int`, *optional*)
165
+ Index where to start
166
+ """
167
+
168
+ for i in range(start_idx, len(text)):
169
+ if text[i] in [".", "?", "!", "\n"]:
170
+ return i
171
+
172
+ return None
173
+
174
+
175
+ def truncate_repetitions(text: str, min_len: int = 30) -> str:
176
+ """
177
+ Attempt to truncate repeating segments in the input string.
178
+
179
+ This function looks for the longest repeating substring at the end of the input string and truncates it to appear
180
+ only once. To be considered for removal, repetitions need to be continuous.
181
+
182
+ Args:
183
+ text (`str`):
184
+ The input raw prediction to be truncated.
185
+ min_len (int):
186
+ The minimum length of the repeating segment.
187
+
188
+ Returns:
189
+ `str`: The input string with repeated segments truncated.
190
+ """
191
+ text_lower = text.lower()
192
+ text_length = len(text_lower)
193
+
194
+ if text_length < 2 * min_len:
195
+ return text
196
+
197
+ # try to find a length at which the tail is repeating
198
+ max_repetition_length = None
199
+ for repetition_length in range(min_len, int(text_length / 2)):
200
+ # check if there is a repetition at the end
201
+ same = True
202
+ for i in range(0, repetition_length):
203
+ if text_lower[text_length - repetition_length - i - 1] != text_lower[text_length - i - 1]:
204
+ same = False
205
+ break
206
+
207
+ if same:
208
+ max_repetition_length = repetition_length
209
+
210
+ if max_repetition_length is None:
211
+ return text
212
+
213
+ lcs = text_lower[-max_repetition_length:]
214
+
215
+ # remove all but the last repetition
216
+ substituted_text = text
217
+ substituted_text_lower = text_lower
218
+ while substituted_text_lower.endswith(lcs):
219
+ substituted_text = substituted_text[:-max_repetition_length]
220
+ substituted_text_lower = substituted_text_lower[:-max_repetition_length]
221
+
222
+ # this is the tail with the repetitions
223
+ repeating_tail = text_lower[len(substituted_text_lower) :]
224
+
225
+ # add until next punctuation and make sure last sentence is not repeating
226
+ substituted_text_lower_out = substituted_text_lower
227
+ while True:
228
+ sentence_end = find_next_punctuation(text_lower, len(substituted_text_lower_out))
229
+ sentence_start = find_next_punctuation(text_lower[::-1], len(substituted_text_lower_out))
230
+ if sentence_end and sentence_start:
231
+ sentence = text_lower[sentence_start:sentence_end]
232
+ substituted_text_lower_out = text_lower[: sentence_end + 1]
233
+ if sentence in repeating_tail:
234
+ break
235
+ else:
236
+ break
237
+
238
+ text_out = text[: len(substituted_text_lower_out)]
239
+
240
+ return text_out
241
+
242
+
243
+ def remove_numbers(lines):
244
+ def _clean(s):
245
+ return re.sub(r"(?:[\d_]|\*\*)", "", s).strip()
246
+
247
+ if isinstance(lines, str):
248
+ return _clean(lines)
249
+ out = []
250
+ for l in lines:
251
+ out.append(_clean(l))
252
+ return out
253
+
254
+
255
+ def get_slices(lines, clean_lines):
256
+ """
257
+ Get slices of text based on specific criteria within the lines.
258
+
259
+ This function identifies and returns slices of text from the input lines based on certain conditions.
260
+
261
+ These conditions were chosen by the Nougat authors:
262
+ - The slice is less than 200 characters long.
263
+ - The slice is more than 3 characters long.
264
+ - The slice does not start with "[MISSING_PAGE".
265
+ - The slice is either the same as the next slice or the ratio of the two in terms of Levensthein distance is
266
+ greater than 0.9.
267
+
268
+ Args:
269
+ lines (`List[str]`):
270
+ The list of lines containing the text.
271
+ clean_lines (`List[str]`):
272
+ A cleaned version of the text (without numbers).
273
+
274
+ Returns:
275
+ `List[tuple]`: A list of tuples representing the start and end indices of text slices.
276
+ """
277
+ indices = np.zeros(len(lines))
278
+ for i in range(len(lines) - 1):
279
+ j = i + 1
280
+ while not clean_lines[j] and j < len(lines) - 1:
281
+ j += 1
282
+ if (
283
+ len(clean_lines[i]) < 200
284
+ and len(clean_lines[i]) > 3
285
+ and len(clean_lines[j]) < 200
286
+ and len(clean_lines[j]) > 3
287
+ and not clean_lines[i].startswith("[MISSING_PAGE")
288
+ and (clean_lines[i] == clean_lines[j] or ratio(clean_lines[i], clean_lines[j]) > 0.9)
289
+ ):
290
+ indices[i:j] = 1
291
+ ids = np.where(indices)[0]
292
+ slices = []
293
+ if len(ids) == 0:
294
+ return slices
295
+ j0 = 0
296
+ for j, x in enumerate(np.diff(ids) > 3):
297
+ if x:
298
+ slices.append((ids[j0], ids[j] + 2))
299
+ j0 = j + 1
300
+ slices.append((ids[j0], ids[-1] + 2))
301
+ return [sli for sli in slices if sli[1] - sli[0] > 15]
302
+
303
+
304
+ def remove_slice_from_lines(lines, clean_text, slice) -> str:
305
+ """
306
+ Remove a slice of text from the lines based on specific criteria.
307
+
308
+ This function identifies a slice of text within the lines and removes it based on certain conditions.
309
+
310
+ Args:
311
+ lines (list of str): The list of lines containing the text.
312
+ clean_text (list of str): A cleaned version of the text (without numbers).
313
+ slice (tuple): A tuple representing the start and end indices of the slice to be removed.
314
+
315
+ Returns:
316
+ str: The removed slice of text as a single string.
317
+ """
318
+ base = clean_text[slice[0]]
319
+ section = list(slice)
320
+ check_start_flag = False
321
+ # backwards pass, at most 5 lines
322
+ for line_idx in range(max(0, slice[0] - 1), max(0, slice[0] - 5), -1):
323
+ if not lines[line_idx]:
324
+ continue
325
+ if lines[line_idx] == "## References":
326
+ section[0] = line_idx
327
+ break
328
+ elif ratio(base, remove_numbers(lines[line_idx])) < 0.9:
329
+ section[0] = line_idx + 1
330
+ potential_ref = remove_numbers(lines[max(0, line_idx - 1)].partition("* [")[-1])
331
+ if len(potential_ref) >= 0.75 * len(base) and ratio(base, potential_ref) < 0.9:
332
+ section[0] = line_idx
333
+ check_start_flag = True
334
+ break
335
+ # forward pass, at most 5 lines
336
+ for line_idx in range(min(len(lines), slice[1]), min(len(lines), slice[1] + 5)):
337
+ if ratio(base, remove_numbers(lines[line_idx])) < 0.9:
338
+ section[1] = line_idx
339
+ break
340
+ if len(lines) <= section[1]:
341
+ section[1] = len(lines) - 1
342
+ to_delete = "\n".join(lines[section[0] : section[1] + 1])
343
+ # cut off next page content
344
+ itera, iterb = enumerate(lines[section[1] - 1]), enumerate(lines[section[1]])
345
+ while True:
346
+ try:
347
+ (ia, a) = next(itera)
348
+ while a.isnumeric():
349
+ (ia, a) = next(itera)
350
+ (ib, b) = next(iterb)
351
+ while b.isnumeric():
352
+ (ib, b) = next(iterb)
353
+ if a != b:
354
+ break
355
+ except StopIteration:
356
+ break
357
+ if check_start_flag and "* [" in to_delete:
358
+ to_delete = "* [" + to_delete.partition("* [")[-1]
359
+ try:
360
+ delta = len(lines[section[1]]) - ib - 1
361
+ if delta > 0:
362
+ to_delete = to_delete[:-delta]
363
+ except UnboundLocalError:
364
+ pass
365
+
366
+ return to_delete.strip()
367
+
368
+
369
+ @add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
370
+ class NougatTokenizerFast(PreTrainedTokenizerFast):
371
+ """
372
+ Fast tokenizer for Nougat (backed by HuggingFace tokenizers library).
373
+
374
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
375
+ refer to this superclass for more information regarding those methods. This class mainly adds Nougat-specific
376
+ methods for postprocessing the generated text.
377
+
378
+ Args:
379
+ vocab_file (`str`, *optional*):
380
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
381
+ contains the vocabulary necessary to instantiate a tokenizer.
382
+ tokenizer_file (`str`, *optional*):
383
+ [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
384
+ contains everything needed to load the tokenizer.
385
+
386
+ clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`):
387
+ Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra
388
+ spaces.
389
+
390
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
391
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
392
+ token instead.
393
+
394
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
395
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
396
+
397
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
398
+ The end of sequence token.
399
+
400
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
401
+ The token used for padding, for example when batching sequences of different lengths.
402
+ """
403
+
404
+ vocab_files_names = VOCAB_FILES_NAMES
405
+ model_input_names = ["input_ids", "attention_mask"]
406
+ slow_tokenizer_class = None
407
+
408
+ def __init__(
409
+ self,
410
+ vocab_file=None,
411
+ tokenizer_file=None,
412
+ clean_up_tokenization_spaces=False,
413
+ unk_token="<unk>",
414
+ bos_token="<s>",
415
+ eos_token="</s>",
416
+ pad_token="<pad>",
417
+ **kwargs,
418
+ ):
419
+ super().__init__(
420
+ vocab_file=vocab_file,
421
+ tokenizer_file=tokenizer_file,
422
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
423
+ unk_token=unk_token,
424
+ bos_token=bos_token,
425
+ eos_token=eos_token,
426
+ pad_token=pad_token,
427
+ **kwargs,
428
+ )
429
+ self.vocab_file = vocab_file
430
+
431
+ def remove_hallucinated_references(self, text: str) -> str:
432
+ """
433
+ Remove hallucinated or missing references from the text.
434
+
435
+ This function identifies and removes references that are marked as missing or hallucinated from the input text.
436
+
437
+ Args:
438
+ text (`str`):
439
+ The input text containing references.
440
+
441
+ Returns:
442
+ `str`: The text with hallucinated references removed.
443
+ """
444
+ lines = text.split("\n")
445
+ if len(lines) == 0:
446
+ return ""
447
+ clean_lines = remove_numbers(lines)
448
+ slices = get_slices(lines, clean_lines)
449
+ to_delete = []
450
+ for slice in slices:
451
+ to_delete.append(remove_slice_from_lines(lines, clean_lines, slice))
452
+ for to_delete in reversed(to_delete):
453
+ text = text.replace(to_delete, "\n\n[MISSING_PAGE_POST]\n\n")
454
+ text = re.sub(
455
+ r"## References\n+\[MISSING_PAGE_POST(:\d+)?\]",
456
+ "\n\n[MISSING_PAGE_POST\\1]",
457
+ text,
458
+ )
459
+ return text
460
+
461
+ def correct_tables(self, generation: str) -> str:
462
+ """
463
+ Takes a generated string and fixes tables/tabulars to make them match the markdown format needed.
464
+
465
+ Args:
466
+ generation (str): The generated text to be postprocessed.
467
+
468
+ Returns:
469
+ str: The postprocessed text.
470
+
471
+ Example:
472
+
473
+ ```python
474
+ correct_tables("\\begin{table} \\begin{tabular}{l l} & \\ \\end{tabular} \\end{table}")
475
+ "\\begin{table}\n\\begin{tabular}{l l} & \\ \\end{tabular}\n\\end{table}"
476
+ ```
477
+ """
478
+ # remove obvious wrong tables
479
+ for l in generation.split("\n"):
480
+ if l.count("\\begin{tabular}") > 15 or l.count("\\multicolumn") > 60 or l.count("&") > 400:
481
+ generation = generation.replace(l, "")
482
+ # whitespace corrections
483
+
484
+ generation = generation.replace("\\begin{table} \\begin{tabular}", "\\begin{table}\n\\begin{tabular}")
485
+ generation = generation.replace("\\end{tabular} \\end{table}", "\\end{tabular}\n\\end{table}")
486
+ generation = generation.replace("\\end{table} Tab", "\\end{table}\nTab")
487
+
488
+ generation = re.sub(r"(^.+)\\begin{tab", r"\1\n\\begin{tab", generation, flags=re.M)
489
+
490
+ # Remove left-aligned empty LaTeX tabular blocks.
491
+ generation = generation.replace(r"\begin{tabular}{l l} & \\ \end{tabular}", "")
492
+ # Remove tabulars with just 2 newline characters.
493
+ generation = generation.replace("\\begin{tabular}{}\n\n\\end{tabular}", "")
494
+ return generation
495
+
496
+ def post_process_single(self, generation: str, fix_markdown: bool = True) -> str:
497
+ """
498
+ Postprocess a single generated text. Regular expressions used here are taken directly from the Nougat article
499
+ authors. These expressions are commented for clarity and tested end-to-end in most cases.
500
+
501
+ Args:
502
+ generation (str): The generated text to be postprocessed.
503
+ fix_markdown (bool, optional): Whether to perform Markdown formatting fixes. Default is True.
504
+
505
+ Returns:
506
+ str: The postprocessed text.
507
+ """
508
+ generation = re.sub(
509
+ r"(?:\n|^)#+ \d*\W? ?(.{100,})", r"\n\1", generation
510
+ ) # too long section titles probably are none
511
+ generation = generation.strip()
512
+ # Remove LaTeX left margin tag
513
+ generation = generation.replace("\n* [leftmargin=*]\n", "\n")
514
+ # Remove lines with markdown headings starting with #, with numerals,
515
+ # and possibly roman numerals with trailing spaces and newlines
516
+ generation = re.sub(r"^#+ (?:\.?(?:\d|[ixv])+)*\s*(?:$|\n\s*)", "", generation, flags=re.M)
517
+ # most likely hallucinated titles
518
+ lines = generation.split("\n")
519
+ if lines[-1].startswith("#") and lines[-1].lstrip("#").startswith(" ") and len(lines) > 1:
520
+ logger.info("Likely hallucinated title at the end of the page: " + lines[-1])
521
+ generation = "\n".join(lines[:-1])
522
+ # obvious repetition detection
523
+ generation = truncate_repetitions(generation)
524
+ # Reference corrections
525
+ generation = self.remove_hallucinated_references(generation)
526
+ # Remove lines starting with asterisks and numbers like "*[1]" and followed by capital letters and periods (ie too long references)
527
+ generation = re.sub(r"^\* \[\d+\](\s?[A-W]\.+\s?){10,}.*$", "", generation, flags=re.M)
528
+ # Remove empty brackets after a reference number in brackets. *[12][]ABC will become *[12]ABC
529
+ generation = re.sub(r"^(\* \[\d+\])\[\](.*)$", r"\1\2", generation, flags=re.M)
530
+ # Remove single characters before or after 2 new lines
531
+ generation = re.sub(r"(^\w\n\n|\n\n\w$)", "", generation)
532
+ # pmc math artifact correction
533
+ generation = re.sub(
534
+ r"([\s.,()])_([a-zA-Z0-9])__([a-zA-Z0-9]){1,3}_([\s.,:()])",
535
+ r"\1\(\2_{\3}\)\4",
536
+ generation,
537
+ )
538
+ generation = re.sub(r"([\s.,\d])_([a-zA-Z0-9])_([\s.,\d;])", r"\1\(\2\)\3", generation)
539
+ # footnote mistakes
540
+ generation = re.sub(
541
+ r"(\nFootnote .*?:) (?:footnotetext|thanks):\W*(.*(?:\n\n|$))",
542
+ r"\1 \2",
543
+ generation,
544
+ )
545
+ # TODO Come up with footnote formatting inside a table
546
+ generation = re.sub(r"\[FOOTNOTE:.+?\](.*?)\[ENDFOOTNOTE\]", "", generation)
547
+ # itemize post processing
548
+ generation = normalize_list_like_lines(generation)
549
+
550
+ if generation.endswith((".", "}")):
551
+ generation += "\n\n"
552
+ if re.match(r"[A-Z0-9,;:]$", generation):
553
+ # add space in case it there is a comma or word ending
554
+ generation += " "
555
+ elif generation.startswith(("#", "**", "\\begin")):
556
+ generation = "\n\n" + generation
557
+ elif generation.split("\n")[-1].startswith(("#", "Figure", "Table")):
558
+ generation = generation + "\n\n"
559
+ else:
560
+ try:
561
+ last_word = generation.split(" ")[-1]
562
+ if last_word in nltk.corpus.words.words():
563
+ generation += " "
564
+ except LookupError:
565
+ # add space just in case. Will split words but better than concatenating them
566
+ generation += " "
567
+
568
+ # table corrections
569
+ generation = self.correct_tables(generation)
570
+ # Remove optional, empty square brackets after begin{array}
571
+ generation = generation.replace("\\begin{array}[]{", "\\begin{array}{")
572
+ # Remove empty or malformed LaTeX tabular blocks with 2 or more columns specified, with spaces and ampersands.
573
+ generation = re.sub(
574
+ r"\\begin{tabular}{([clr ]){2,}}\s*[& ]*\s*(\\\\)? \\end{tabular}",
575
+ "",
576
+ generation,
577
+ )
578
+ # Remove lines containing "S.A.B." one or more times. Was included in Nougat's code.
579
+ generation = re.sub(r"(\*\*S\. A\. B\.\*\*\n+){2,}", "", generation)
580
+ # Remove markdown-style headers that are incomplete or empty on multiple lines.
581
+ generation = re.sub(r"^#+( [\[\d\w])?$", "", generation, flags=re.M)
582
+ # Remove lines with just one period.
583
+ generation = re.sub(r"^\.\s*$", "", generation, flags=re.M)
584
+ # Replace instances of three or more newlines with just two newlines.
585
+ generation = re.sub(r"\n{3,}", "\n\n", generation)
586
+ if fix_markdown:
587
+ return markdown_compatible(generation)
588
+ else:
589
+ return generation
590
+
591
+ def post_process_generation(
592
+ self,
593
+ generation: Union[str, List[str]],
594
+ fix_markdown: bool = True,
595
+ num_workers: int = None,
596
+ ) -> Union[str, List[str]]:
597
+ """
598
+ Postprocess a generated text or a list of generated texts.
599
+
600
+ This function can be used to perform postprocessing on generated text, such as fixing Markdown formatting.
601
+
602
+ Postprocessing is quite slow so it is recommended to use multiprocessing to speed up the process.
603
+
604
+ Args:
605
+ generation (Union[str, List[str]]):
606
+ The generated text or a list of generated texts.
607
+ fix_markdown (`bool`, *optional*, defaults to `True`):
608
+ Whether to perform Markdown formatting fixes.
609
+ num_workers (`int`, *optional*):
610
+ Optional number of workers to pass to leverage multiprocessing (postprocessing several texts in
611
+ parallel).
612
+
613
+ Returns:
614
+ Union[str, List[str]]: The postprocessed text or list of postprocessed texts.
615
+ """
616
+ requires_backends(self, ["nltk", "levenshtein"])
617
+
618
+ if isinstance(generation, list):
619
+ if num_workers is not None and isinstance(num_workers, int):
620
+ with Pool(num_workers) as p:
621
+ return p.map(partial(self.post_process_single, fix_markdown=fix_markdown), generation)
622
+ else:
623
+ return [self.post_process_single(s, fix_markdown=fix_markdown) for s in generation]
624
+ else:
625
+ return self.post_process_single(generation, fix_markdown=fix_markdown)
llmeval-env/lib/python3.10/site-packages/transformers/models/swin/__init__.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
20
+
21
+
22
+ try:
23
+ if not is_torch_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["modeling_swin"] = [
29
+ "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
30
+ "SwinForImageClassification",
31
+ "SwinForMaskedImageModeling",
32
+ "SwinModel",
33
+ "SwinPreTrainedModel",
34
+ "SwinBackbone",
35
+ ]
36
+
37
+ try:
38
+ if not is_tf_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_tf_swin"] = [
44
+ "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
45
+ "TFSwinForImageClassification",
46
+ "TFSwinForMaskedImageModeling",
47
+ "TFSwinModel",
48
+ "TFSwinPreTrainedModel",
49
+ ]
50
+
51
+ if TYPE_CHECKING:
52
+ from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
53
+
54
+ try:
55
+ if not is_torch_available():
56
+ raise OptionalDependencyNotAvailable()
57
+ except OptionalDependencyNotAvailable:
58
+ pass
59
+ else:
60
+ from .modeling_swin import (
61
+ SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
62
+ SwinBackbone,
63
+ SwinForImageClassification,
64
+ SwinForMaskedImageModeling,
65
+ SwinModel,
66
+ SwinPreTrainedModel,
67
+ )
68
+
69
+ try:
70
+ if not is_tf_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .modeling_tf_swin import (
76
+ TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
77
+ TFSwinForImageClassification,
78
+ TFSwinForMaskedImageModeling,
79
+ TFSwinModel,
80
+ TFSwinPreTrainedModel,
81
+ )
82
+
83
+ else:
84
+ import sys
85
+
86
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/swin/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/swin/__pycache__/configuration_swin.cpython-310.pyc ADDED
Binary file (7.34 kB). View file