text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import glob import torch from huggingface_hub import snapshot_download from safetensors import safe_open from transformers import ( AddedToken, AriaForConditionalGeneration, AriaProcessor, AutoConfig, AutoTokenizer, ) EPILOG_TXT = """Example: python transformers/src/transformers/models/aria/convert_aria_weights_to_hf.py --text_model_id rhymes-ai/Aria --vision_model_id rhymes-ai/Aria --output_hub_path m-ric/Aria_hf_2 --old_state_dict_id rhymes-ai/Aria Example for creating the old state dict file with Python: import torch from aria.model.language_model.aria_llama import AriaTextForCausalLM # load model kwargs = {"device_map": "auto", "torch_dtype": torch.float16} model = AriaTextForCausalLM.from_pretrained("rhymes-ai/Aria", low_cpu_mem_usage=True, **kwargs) # load vision tower model.get_vision_tower().load_model() # Save state dict torch.save(model.state_dict(), "tmp/hf_models/aria/model_state_dict.bin") """ KEYS_TO_MODIFY_MAPPING = { "vision_tower.vision_model": "vision_tower", "ln_ffn": "layer_norm", "ffn": "feed_forward", "ln_kv": "layer_norm_kv", } def load_original_state_dict(model_id): directory_path = snapshot_download(repo_id=model_id, allow_patterns=["*.safetensors"]) original_state_dict = {} for path in glob.glob(f"{directory_path}/*"): if path.endswith(".safetensors"): with safe_open(path, framework="pt", device="cpu") as f: for key in f.keys(): original_state_dict[key] = f.get_tensor(key) return original_state_dict def convert_state_dict_to_hf(state_dict): new_state_dict = {} for key, value in state_dict.items(): if key.endswith(".inv_freq"): continue for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) new_state_dict[key] = value new_state_dict["vision_tower.post_layernorm.weight"] = torch.zeros((1152,)) new_state_dict["vision_tower.post_layernorm.bias"] = torch.zeros((1152,)) return new_state_dict def convert_aria_llama_to_hf(text_model_id, vision_model_id, output_hub_path, old_state_dict_id): torch.set_default_dtype(torch.float16) tokenizer = AutoTokenizer.from_pretrained( text_model_id, extra_special_tokens={ "image_token": "<|img|>", "pad_token": "<pad>", }, ) tokenizer.add_tokens(AddedToken("<|img|>", special=True, normalized=False), special_tokens=True) tokenizer.add_special_tokens({"pad_token": "<pad>"}) tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}{% elif message['content'] is iterable %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<fim_prefix><|img|><fim_suffix>{% endif %}{% endfor %}{% endif %}<|im_end|>\n{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}" processor = AriaProcessor.from_pretrained( text_model_id, tokenizer=tokenizer, ) config = AutoConfig.from_pretrained(text_model_id) config.vision_config.hidden_size = 1152 config.vision_config.attention_heads = 16 config.pad_token_id = 2 config.image_token_index = 9 config.intermediate_size = config.moe_intermediate_size config.auto_map = { "AutoConfig": "modeling_aria.AriaConfig", "AutoModelForCausalLM": "modeling_aria.AriaForConditionalGeneration", } with torch.device("meta"): model = AriaForConditionalGeneration(config) state_dict = load_original_state_dict(old_state_dict_id) state_dict = convert_state_dict_to_hf(state_dict) model.load_state_dict(state_dict, strict=False, assign=True) # print("Saving models") # model.save_pretrained("local_aria", safe_serialization=False) # processor.save_pretrained("local_aria") print("Pushing to hub") model.push_to_hub(output_hub_path, create_pr=True) processor.push_to_hub(output_hub_path, create_pr=True) def main(): parser = argparse.ArgumentParser( epilog=EPILOG_TXT, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( "--text_model_id", default="rhymes-ai/Aria", help="Hub location of the text model", ) parser.add_argument( "--vision_model_id", default="rhymes-ai/Aria", help="Hub location of the vision model", ) parser.add_argument( "--output_hub_path", default="rhymes-ai/Aria", help="Location on the hub of the converted model", ) parser.add_argument( "--old_state_dict_id", default="rhymes-ai/Aria", help="Location on the hub of the raw state dict of the original model. The filename needs to be `model_state_dict.bin`", ) args = parser.parse_args() convert_aria_llama_to_hf(args.text_model_id, args.vision_model_id, args.output_hub_path, args.old_state_dict_id) if __name__ == "__main__": main()
transformers/src/transformers/models/aria/convert_aria_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/aria/convert_aria_weights_to_hf.py", "repo_id": "transformers", "token_count": 2371 }
# coding=utf-8 # Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Auto Model class.""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES logger = logging.get_logger(__name__) FLAX_MODEL_MAPPING_NAMES = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("bloom", "FlaxBloomModel"), ("clip", "FlaxCLIPModel"), ("dinov2", "FlaxDinov2Model"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gemma", "FlaxGemmaModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("llama", "FlaxLlamaModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mistral", "FlaxMistralModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("dinov2", "FlaxDinov2ForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("bloom", "FlaxBloomForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gemma", "FlaxGemmaForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("llama", "FlaxLlamaForCausalLM"), ("mistral", "FlaxMistralForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) FLAX_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) FLAX_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) FLAX_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class FlaxAutoModel(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_MAPPING FlaxAutoModel = auto_class_update(FlaxAutoModel) class FlaxAutoModelForPreTraining(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_PRETRAINING_MAPPING FlaxAutoModelForPreTraining = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class FlaxAutoModelForCausalLM(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING FlaxAutoModelForCausalLM = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class FlaxAutoModelForMaskedLM(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_MASKED_LM_MAPPING FlaxAutoModelForMaskedLM = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class FlaxAutoModelForSeq2SeqLM(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING FlaxAutoModelForSeq2SeqLM = auto_class_update( FlaxAutoModelForSeq2SeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="google-t5/t5-base", ) class FlaxAutoModelForSequenceClassification(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING FlaxAutoModelForSequenceClassification = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class FlaxAutoModelForQuestionAnswering(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING FlaxAutoModelForQuestionAnswering = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class FlaxAutoModelForTokenClassification(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING FlaxAutoModelForTokenClassification = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class FlaxAutoModelForMultipleChoice(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING FlaxAutoModelForMultipleChoice = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class FlaxAutoModelForNextSentencePrediction(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING FlaxAutoModelForNextSentencePrediction = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class FlaxAutoModelForImageClassification(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING FlaxAutoModelForImageClassification = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class FlaxAutoModelForVision2Seq(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING FlaxAutoModelForVision2Seq = auto_class_update(FlaxAutoModelForVision2Seq, head_doc="vision-to-text modeling") class FlaxAutoModelForSpeechSeq2Seq(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING FlaxAutoModelForSpeechSeq2Seq = auto_class_update( FlaxAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling" )
transformers/src/transformers/models/auto/modeling_flax_auto.py/0
{ "file_path": "transformers/src/transformers/models/auto/modeling_flax_auto.py", "repo_id": "transformers", "token_count": 6214 }
# coding=utf-8 # Copyright 2023 The Suno AI Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BARK model.""" import math from typing import Dict, Optional, Tuple, Union import numpy as np import torch from torch import nn from torch.nn import functional as F from ...generation import GenerationMixin from ...generation.logits_process import ( AlternatingCodebooksLogitsProcessor, BarkEosPrioritizerLogitsProcessor, SuppressTokensLogitsProcessor, ) from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import CausalLMOutputWithPast, MaskedLMOutput from ...modeling_utils import PreTrainedModel, get_parameter_device from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_accelerate_available, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, ) from ..auto import AutoModel from .configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, BarkSubModelConfig, ) from .generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkSemanticGenerationConfig, ) if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "suno/bark-small" _CONFIG_FOR_DOC = "BarkConfig" class BarkSelfAttention(nn.Module): # adapted from GPTNeoSelfAttention and Bark code # BarkSelfAttention can have two attention type, i.e full attention or causal attention def __init__(self, config, is_causal=False): super().__init__() # regularization self.dropout = config.dropout self.attn_dropout = nn.Dropout(config.dropout) self.resid_dropout = nn.Dropout(config.dropout) self.embed_dim = config.hidden_size self.num_heads = config.num_heads self.head_dim = self.embed_dim // self.num_heads if config.hidden_size % config.num_heads != 0: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) # key, query, value projections for all heads, but in a batch self.att_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.bias) # output projection self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.bias) self.is_causal = is_causal if is_causal: block_size = config.block_size bias = torch.tril(torch.ones((block_size, block_size), dtype=bool)).view(1, 1, block_size, block_size) self.register_buffer("bias", bias) # Copied from transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoSelfAttention._split_heads def _split_heads(self, tensor, num_heads, attn_head_size): """ Splits hidden_size dim into attn_head_size and num_heads """ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) def _merge_heads(self, tensor, num_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden_size """ # re-assemble all head outputs side by side # (batch, num_heads, seq_len, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size) tensor = tensor.transpose(1, 2).contiguous() tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) return tensor def _attn(self, query, key, value, attention_mask=None, head_mask=None): # unlike GPTNeo's SelfAttention, divide by the square root of the dimension of the query and the key attn_weights = torch.matmul(query, key.transpose(-1, -2)) * (1.0 / math.sqrt(self.head_dim)) if self.is_causal: query_length, key_length = query.size(-2), key.size(-2) # fill the upper left part of the attention weights with inf attn_weights = attn_weights.masked_fill( self.bias[:, :, key_length - query_length : key_length, :key_length] == 0, torch.finfo(attn_weights.dtype).min, ) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.to(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask # (batch, num_heads, seq_len, seq_len) x (batch, num_heads, seq_len, attn_head_size) # -> (batch, num_heads, seq_len, attn_head_size) attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def forward( self, hidden_states, attention_mask=None, past_key_values=None, head_mask=None, use_cache=False, output_attentions=False, ): # calculate query, key, values for all heads in batch and move head forward to be the batch dim query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if past_key_values is not None: past_key = past_key_values[0] past_value = past_key_values[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs class BarkSelfFlashAttention2(BarkSelfAttention): """ Bark flash attention module. This module inherits from `BarkSelfAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _split_heads(self, tensor, num_heads, attn_head_size): """ Splits hidden_size dim into attn_head_size and num_heads """ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim - (batch, seq_length, head, head_features) return tensor def _merge_heads(self, tensor, num_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden_size """ # re-assemble all head outputs side by side # (batch, seq_len, num_heads, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size) tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) return tensor def forward( self, hidden_states, attention_mask=None, past_key_values=None, head_mask=None, use_cache=False, output_attentions=False, ): batch_size, query_len, _ = hidden_states.size() # calculate query, key, values for all heads in batch and move head forward to be the batch dim query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if past_key_values is not None: # (batch, head, seq_length, head_features) -> (batch, seq_length, head, head_features) past_key = past_key_values[0].transpose(1, 2) past_value = past_key_values[1].transpose(1, 2) # and merge on seq_length key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache is True: # (batch, head, seq_length, head_features) present = (key.transpose(1, 2), value.transpose(1, 2)) else: present = None attn_output = _flash_attention_forward( query, key, value, attention_mask, query_len, dropout=self.dropout if self.training else 0.0, use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal, ) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: attn_weights = None outputs += (attn_weights,) return outputs BARK_ATTENTION_CLASSES = { "eager": BarkSelfAttention, "flash_attention_2": BarkSelfFlashAttention2, } class BarkLayerNorm(nn.Module): """LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False.""" def __init__(self, hidden_size, bias=True): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) if bias else None def forward(self, input): return F.layer_norm(input, self.weight.shape, self.weight, self.bias, eps=1e-5) class BarkMLP(nn.Module): def __init__(self, config): super().__init__() self.in_proj = nn.Linear(config.hidden_size, 4 * config.hidden_size, bias=config.bias) self.out_proj = nn.Linear(4 * config.hidden_size, config.hidden_size, bias=config.bias) self.dropout = nn.Dropout(config.dropout) self.gelu = nn.GELU() def forward(self, hidden_states): hidden_states = self.in_proj(hidden_states) hidden_states = self.gelu(hidden_states) hidden_states = self.out_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class BarkBlock(nn.Module): def __init__(self, config, is_causal=False): super().__init__() if is_causal: # if causal, uses handmade LayerNorm, so that the layerNorm bias is optional # this handmade layerNorm is used to stick with Bark choice of leaving optional bias in # AutoRegressive models (corresponding to the "Text" and the "Coarse" modules) self.layernorm_1 = BarkLayerNorm(config.hidden_size, bias=config.bias) self.layernorm_2 = BarkLayerNorm(config.hidden_size, bias=config.bias) else: self.layernorm_1 = nn.LayerNorm(config.hidden_size) self.layernorm_2 = nn.LayerNorm(config.hidden_size) self.attn = BARK_ATTENTION_CLASSES[config._attn_implementation](config, is_causal=is_causal) self.mlp = BarkMLP(config) def forward( self, hidden_states, past_key_values=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, ): intermediary_hidden_states = self.layernorm_1(hidden_states) attn_outputs = self.attn( intermediary_hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] # output_attn: output, present_key_values, (attn_weights) outputs = attn_outputs[1:] intermediary_hidden_states = hidden_states + attn_output intermediary_hidden_states = intermediary_hidden_states + self.mlp( self.layernorm_2(intermediary_hidden_states) ) if use_cache: outputs = (intermediary_hidden_states,) + outputs else: outputs = (intermediary_hidden_states,) + outputs[1:] return outputs # hidden_states, ((present), attentions) class BarkPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BarkConfig supports_gradient_checkpointing = False _supports_flash_attn_2 = True def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear,)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ # if has _hf_hook, has been offloaded so the device has to be found in the hook if not hasattr(self, "_hf_hook"): return get_parameter_device(self) for module in self.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return get_parameter_device(self) BARK_MODEL_START_DOCSTRING = """ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`{config}`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BARK_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BarkConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BARK_FINE_INPUTS_DOCSTRING = r""" Args: codebook_idx (`int`): Index of the codebook that will be predicted. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, number_of_codebooks)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Initially, indices of the first two codebooks are obtained from the `coarse` sub-model. The rest is predicted recursively by attending the previously predicted channels. The model predicts on windows of length 1024. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): NOT IMPLEMENTED YET. input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `input_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ BARK_CAUSAL_MODEL_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. Here, due to `Bark` particularities, if `past_key_values` is used, `input_embeds` will be ignored and you have to use `input_ids`. If `past_key_values` is not used and `use_cache` is set to `True`, `input_embeds` is used in priority instead of `input_ids`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # GPT2-like autoregressive model class BarkCausalModel(BarkPreTrainedModel, GenerationMixin): config_class = BarkSubModelConfig def __init__(self, config): super().__init__(config) self.config = config # initialize as an autoregressive GPT-like model self.input_embeds_layer = nn.Embedding(config.input_vocab_size, config.hidden_size) self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) self.drop = nn.Dropout(config.dropout) self.layers = nn.ModuleList([BarkBlock(config, is_causal=True) for _ in range(config.num_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layernorm_final = BarkLayerNorm(config.hidden_size, bias=config.bias) self.lm_head = nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.input_embeds_layer def set_input_embeddings(self, new_embeddings): self.input_embeds_layer = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): # Overwritten -- bark has a model-specific hack input_embeds = kwargs.get("input_embeds", None) attention_mask = kwargs.get("attention_mask", None) position_ids = kwargs.get("position_ids", None) if past_key_values is not None: # Omit tokens covered by past_key_values seq_len = input_ids.shape[1] past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] # input_embeds have already been used and is not required anymore input_embeds = None else: if input_embeds is not None and kwargs.get("use_cache"): seq_len = input_embeds.shape[1] else: seq_len = input_ids.shape[1] # ensure that attention_mask and position_ids shapes are aligned with the weird Bark hack of reducing # sequence length on the first forward pass if attention_mask is not None: attention_mask = attention_mask[:, :seq_len] if position_ids is not None: position_ids = position_ids[:, :seq_len] if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] else: position_ids = None if input_embeds is not None and kwargs.get("use_cache"): return { "input_ids": None, "input_embeds": input_embeds, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, } return { "input_ids": input_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, } @add_start_docstrings_to_model_forward(BARK_CAUSAL_MODEL_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, input_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict loss = None if labels is not None: raise NotImplementedError( "Training is not implemented yet for Bark - ensure you do not pass `labels` to the model." ) # Verify if input_embeds already exists # then compute embeddings. if input_ids is not None and input_embeds is not None: raise ValueError("You cannot specify both input_ids and input_embeds at the same time") elif input_embeds is not None and past_key_values is None: # we want to return the input_embeds in priority so that it is in line with a weird hack # of Bark which concatenate two bits of the input_embeds on the first forward pass of the semantic model pass elif input_ids is not None: input_embeds = self.input_embeds_layer(input_ids) # token embeddings of shape (b, t, n_embd) elif input_embeds is not None: pass else: raise ValueError("You have to specify either input_ids or input_embeds") input_shape = input_embeds.size()[:-1] batch_size = input_embeds.shape[0] seq_length = input_shape[-1] device = input_ids.device if input_ids is not None else input_embeds.device if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.layers)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # shape (1, seq_length) position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd) # Attention mask. if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: attention_mask = attention_mask.view(batch_size, -1) # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length] # from_seq_length is 1 to easily broadcast attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x num_heads x N x N # head_mask has shape num_layers x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.num_layers) hidden_states = self.drop(input_embeds + position_embeds) output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False present_key_values = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, past_layer_key_values) in enumerate(zip(self.layers, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, None, attention_mask, head_mask[i], use_cache, output_attentions, ) else: outputs = block( hidden_states, past_key_values=past_layer_key_values, attention_mask=attention_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache: present_key_values = present_key_values + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.layernorm_final(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) logits = self.lm_head(hidden_states) if not return_dict: return tuple( v for v in [None, logits, present_key_values, all_hidden_states, all_self_attentions] if v is not None ) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. """ # Necessary for beam_search return tuple( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) for layer_past in past_key_values ) @add_start_docstrings( """Bark semantic (or text) model. It shares the same architecture as the coarse model. It is a GPT-2 like autoregressive model with a language modeling head on top.""", BARK_MODEL_START_DOCSTRING.format(config="BarkSemanticConfig"), ) class BarkSemanticModel(BarkCausalModel): base_model_prefix = "semantic" config_class = BarkSemanticConfig def generate( self, input_ids: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig = None, history_prompt: Optional[Dict[str, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> torch.LongTensor: """ Generates text semantic tokens from an input prompt and an additional optional `Bark` speaker prompt. Args: input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*): Input ids, i.e tokenized input sentences. Will be truncated up to semantic_generation_config.max_input_semantic_length tokens. Note that the output audios will be as long as the longest generation among the batch. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. attention_mask (`Optional[torch.Tensor]`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Returns: torch.LongTensor: Output semantic tokens. """ if semantic_generation_config is None: raise ValueError("`semantic_generation_config` has to be provided") batch_size = input_ids.shape[0] max_input_semantic_length = semantic_generation_config.max_input_semantic_length input_ids = input_ids + semantic_generation_config.text_encoding_offset if attention_mask is not None: input_ids = input_ids.masked_fill((1 - attention_mask).bool(), semantic_generation_config.text_pad_token) if history_prompt is not None: semantic_history = history_prompt["semantic_prompt"][-max_input_semantic_length:] semantic_history = nn.functional.pad( semantic_history, (0, max_input_semantic_length - len(semantic_history)), value=semantic_generation_config.semantic_pad_token, mode="constant", ) else: semantic_history = torch.tensor( [semantic_generation_config.semantic_pad_token] * max_input_semantic_length, dtype=torch.int ).to(self.device) semantic_history = torch.repeat_interleave(semantic_history[None], batch_size, dim=0) infer_array = torch.tensor( [[semantic_generation_config.semantic_infer_token]] * batch_size, dtype=torch.int ).to(self.device) input_embeds = torch.cat( [ self.input_embeds_layer(input_ids[:, :max_input_semantic_length]) + self.input_embeds_layer(semantic_history[:, : max_input_semantic_length + 1]), self.input_embeds_layer(infer_array), ], dim=1, ) tokens_to_suppress = list( range(semantic_generation_config.semantic_vocab_size, semantic_generation_config.semantic_pad_token) ) tokens_to_suppress.extend( list(range(semantic_generation_config.semantic_pad_token + 1, self.config.output_vocab_size)) ) suppress_tokens_logits_processor = SuppressTokensLogitsProcessor(tokens_to_suppress, device=input_ids.device) min_eos_p = kwargs.get("min_eos_p", semantic_generation_config.min_eos_p) early_stopping_logits_processor = BarkEosPrioritizerLogitsProcessor( eos_token_id=semantic_generation_config.eos_token_id, min_eos_p=min_eos_p, device=input_ids.device ) # pass input_ids in order to stay consistent with the transformers generate method even though it is not used # (except to get the input seq_len - that's why we keep the first 257 tokens) semantic_output = super().generate( torch.ones((batch_size, max_input_semantic_length + 1), dtype=torch.int).to(self.device), input_embeds=input_embeds, logits_processor=[suppress_tokens_logits_processor, early_stopping_logits_processor], generation_config=semantic_generation_config, **kwargs, ) # size: 10048 # take the generated semantic tokens semantic_output = semantic_output[:, max_input_semantic_length + 1 :] return semantic_output @add_start_docstrings( """Bark coarse acoustics model. It shares the same architecture as the semantic (or text) model. It is a GPT-2 like autoregressive model with a language modeling head on top.""", BARK_MODEL_START_DOCSTRING.format(config="BarkCoarseConfig"), ) class BarkCoarseModel(BarkCausalModel): base_model_prefix = "coarse_acoustics" config_class = BarkCoarseConfig def preprocess_histories( self, max_coarse_history: int, semantic_to_coarse_ratio: int, batch_size: int, semantic_generation_config: int, codebook_size: int, history_prompt: Optional[Dict[str, torch.Tensor]] = None, ): """ Preprocess the optional `Bark` speaker prompts before `self.generate`. Args: max_coarse_history (`int`): Maximum size of coarse tokens used. semantic_to_coarse_ratio (`int`): Ratio of semantic to coarse frequency batch_size (`int`): Batch size, i.e the number of samples. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. codebook_size (`int`): Codebook channel size, i.e. the size of the output vocabulary per codebook channel. history_prompt (`Optional[Dict[str,torch.Tensor]]`): Optional `Bark` speaker prompt. Returns: Returns: `tuple(torch.FloatTensor)`: - **x_semantic_history** (`torch.FloatTensor` -- Processed semantic speaker prompt. - **x_coarse_history** (`torch.FloatTensor`) -- Processed coarse speaker prompt. """ if history_prompt is not None: x_semantic_history = torch.repeat_interleave(history_prompt["semantic_prompt"][None], batch_size, dim=0) # clone to avoid modifying history_prompt.coarse_prompt x_coarse_history = history_prompt["coarse_prompt"].clone() # offset x_coarse_history if codebook_size is not None: for n in range(1, x_coarse_history.shape[0]): # offset x_coarse_history[n, :] += codebook_size * n # flatten x_coarse_history x_coarse_history = torch.transpose(x_coarse_history, 0, 1).reshape(-1) x_coarse_history = x_coarse_history + semantic_generation_config.semantic_vocab_size x_coarse_history = torch.repeat_interleave(x_coarse_history[None], batch_size, dim=0) # e.g: after SEMANTIC_VOCAB_SIZE (10000), 1024 tokens dedicated to first codebook, 1024 next tokens # dedicated to second codebook. max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) # trim histories correctly n_semantic_hist_provided = min( [ max_semantic_history, x_semantic_history.shape[1] - x_semantic_history.shape[1] % 2, int(np.floor(x_coarse_history.shape[1] / semantic_to_coarse_ratio)), ] ) n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio)) x_semantic_history = x_semantic_history[:, -n_semantic_hist_provided:].int() x_coarse_history = x_coarse_history[:, -n_coarse_hist_provided:].int() # bit of a hack for time alignment (sounds better) - from Bark original implementation x_coarse_history = x_coarse_history[:, :-2] else: # shape: (batch_size, 0) x_semantic_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device) x_coarse_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device) return x_semantic_history, x_coarse_history def generate( self, semantic_output: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig = None, coarse_generation_config: BarkCoarseGenerationConfig = None, codebook_size: int = 1024, history_prompt: Optional[Dict[str, torch.Tensor]] = None, return_output_lengths: Optional[bool] = None, **kwargs, ) -> Union[torch.LongTensor, Tuple[torch.LongTensor, torch.LongTensor]]: """ Generates coarse acoustics tokens from input text semantic tokens and an additional optional `Bark` speaker prompt. Args: semantic_output (`torch.Tensor` of shape (batch_size, seq_len), *optional*): Input text semantic ids, i.e the output of `BarkSemanticModel.generate`. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. coarse_generation_config (`BarkCoarseGenerationConfig`): Generation config indicating how to generate the coarse tokens. codebook_size (`int`, *optional*, defaults to 1024): Codebook channel size, i.e. the size of the output vocabulary per codebook channel. history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. return_output_lengths (`bool`, *optional*): Whether or not to return the output lengths. Useful when batching. Returns: By default: torch.LongTensor: Output coarse acoustics tokens. If `return_output_lengths=True`: `Tuple(torch.Tensor, torch.Tensor): The output coarse acoustics tokens, and the length of each sample of the batch. """ if semantic_generation_config is None: raise ValueError("`semantic_generation_config` has to be provided") if coarse_generation_config is None: raise ValueError("`coarse_generation_config` has to be provided") max_coarse_input_length = coarse_generation_config.max_coarse_input_length max_coarse_history = coarse_generation_config.max_coarse_history sliding_window_len = coarse_generation_config.sliding_window_len # replace semantic_pad_token (eos_tok and pad_tok here) with coarse_semantic_pad_token i.e the pad_token # used in the next model semantic_output.masked_fill_( semantic_output == semantic_generation_config.semantic_pad_token, coarse_generation_config.coarse_semantic_pad_token, ) semantic_to_coarse_ratio = ( coarse_generation_config.coarse_rate_hz / semantic_generation_config.semantic_rate_hz * coarse_generation_config.n_coarse_codebooks ) max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) output_lengths = (semantic_output != coarse_generation_config.coarse_semantic_pad_token).sum(1) output_lengths = torch.floor( output_lengths * semantic_to_coarse_ratio / coarse_generation_config.n_coarse_codebooks ) output_lengths = torch.round(output_lengths * coarse_generation_config.n_coarse_codebooks).int() max_generated_len = torch.max(output_lengths).item() batch_size = semantic_output.shape[0] x_semantic_history, x_coarse = self.preprocess_histories( history_prompt=history_prompt, max_coarse_history=max_coarse_history, semantic_to_coarse_ratio=semantic_to_coarse_ratio, batch_size=batch_size, semantic_generation_config=semantic_generation_config, codebook_size=codebook_size, ) base_semantic_idx = x_semantic_history.shape[1] semantic_output = torch.hstack([x_semantic_history, semantic_output]) n_window_steps = int(np.ceil(max_generated_len / sliding_window_len)) total_generated_len = 0 len_coarse_history = x_coarse.shape[1] for _ in range(n_window_steps): semantic_idx = base_semantic_idx + int(round(total_generated_len / semantic_to_coarse_ratio)) # pad from right side input_coarse = semantic_output[:, np.max([0, semantic_idx - max_semantic_history]) :] input_coarse = input_coarse[:, :max_coarse_input_length] input_coarse = F.pad( input_coarse, (0, max_coarse_input_length - input_coarse.shape[-1]), "constant", coarse_generation_config.coarse_semantic_pad_token, ) input_coarse = torch.hstack( [ input_coarse, torch.tensor([[coarse_generation_config.coarse_infer_token]] * batch_size).to(self.device), x_coarse[:, -max_coarse_history:], ] ) alternatingLogitsProcessor = AlternatingCodebooksLogitsProcessor( input_coarse.shape[1], semantic_generation_config.semantic_vocab_size, codebook_size, ) output_coarse = super().generate( input_coarse, logits_processor=[alternatingLogitsProcessor], max_new_tokens=min(sliding_window_len, max_generated_len - total_generated_len), generation_config=coarse_generation_config, **kwargs, ) input_coarse_len = input_coarse.shape[1] x_coarse = torch.hstack([x_coarse, output_coarse[:, input_coarse_len:]]) total_generated_len = x_coarse.shape[1] - len_coarse_history del output_coarse coarse_output = x_coarse[:, len_coarse_history:] if return_output_lengths: return coarse_output, output_lengths return coarse_output @add_start_docstrings( """Bark fine acoustics model. It is a non-causal GPT-like model with `config.n_codes_total` embedding layers and language modeling heads, one for each codebook.""", BARK_MODEL_START_DOCSTRING.format(config="BarkFineConfig"), ) class BarkFineModel(BarkPreTrainedModel): base_model_prefix = "fine_acoustics" config_class = BarkFineConfig main_input_name = "codebook_idx" def __init__(self, config): # non-causal gpt-like model with one embedding layer and one lm_head for each codebook of Encodec super().__init__(config) self.config = config # initialize a modified non causal GPT-like model # note that for there is one embedding layer and one lm_head for each codebook of Encodec self.input_embeds_layers = nn.ModuleList( [nn.Embedding(config.input_vocab_size, config.hidden_size) for _ in range(config.n_codes_total)] ) self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) self.drop = nn.Dropout(config.dropout) self.layers = nn.ModuleList([BarkBlock(config, is_causal=False) for _ in range(config.num_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layernorm_final = nn.LayerNorm(config.hidden_size) self.lm_heads = nn.ModuleList( [ nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) for _ in range(config.n_codes_given, config.n_codes_total) ] ) self.gradient_checkpointing = False self.n_codes_total = config.n_codes_total # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): # one embedding layers for each codebook return self.input_embeds_layers def set_input_embeddings(self, new_embeddings): # one embedding layers for each codebook self.input_embeds_layers = new_embeddings def get_output_embeddings(self): # one lm_head for each codebook return self.lm_heads def set_output_embeddings(self, new_output_embeddings): # one lm_head for each codebook self.lm_heads = new_output_embeddings def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None, mean_resizing=True): old_embeddings_list = self.get_input_embeddings() new_embeddings_list = nn.ModuleList( [ self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of, mean_resizing) for old_embeddings in old_embeddings_list ] ) self.set_input_embeddings(new_embeddings_list) new_num_tokens = new_embeddings_list[0].weight.shape[0] # if word embeddings are not tied, make sure that lm head is resized as well if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: old_lm_head_list = self.get_output_embeddings() new_lm_head_list = nn.ModuleList( [self._get_resized_lm_head(old_lm_head, new_num_tokens) for old_lm_head in old_lm_head_list] ) self.set_output_embeddings(new_lm_head_list) return self.get_input_embeddings() def resize_token_embeddings( self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True, ) -> nn.Embedding: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens (`int`, *optional*): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities won't be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. """ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) if new_num_tokens is None and pad_to_multiple_of is None: return model_embeds # Update base model and current model config self.config.output_vocab_size = model_embeds[0].weight.shape[0] self.config.vocab_size = model_embeds[0].weight.shape[0] self.output_vocab_size = model_embeds[0].weight.shape[0] self.vocab_size = model_embeds[0].weight.shape[0] # Tie weights again if needed self.tie_weights() return model_embeds def _tie_weights(self): if getattr(self.config, "tie_word_embeddings", True): self._tied_weights_keys = [] output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() for i in range(self.config.n_codes_total - self.config.n_codes_given): # self.input_embeds_layers[i + 1].weight = self.lm_heads[i].weight self._tie_or_clone_weights(output_embeddings[i], input_embeddings[i + 1]) self._tied_weights_keys.append(f"lm_heads.{i}.weight") def tie_weights(self): """ Tie the weights between the input embeddings list and the output embeddings list. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ if getattr(self.config, "tie_word_embeddings", True): self._tied_weights_keys = [] output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() for i in range(self.config.n_codes_total - self.config.n_codes_given): # self.input_embeds_layers[i + 1].weight = self.lm_heads[i].weight self._tie_or_clone_weights(output_embeddings[i], input_embeddings[i + 1]) self._tied_weights_keys.append(f"lm_heads.{i}.weight") for module in self.modules(): if hasattr(module, "_tie_weights"): module._tie_weights() @add_start_docstrings_to_model_forward(BARK_FINE_INPUTS_DOCSTRING) def forward( self, codebook_idx: int, # an additionnal idx corresponding to the id of the codebook that will be predicted input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, input_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict loss = None if labels is not None: raise NotImplementedError("Training is not implemented yet") if codebook_idx == 0: raise ValueError("Cannot predict 0th codebook - 0th codebook should be predicted by the coarse model") if input_ids is not None and input_embeds is not None: raise ValueError("You cannot specify both input_ids and input_embeds at the same time") if input_ids is None and input_embeds is None: raise ValueError("You have to specify either input_ids or input_embeds") if input_ids is not None: # the input_embeddings are the sum of the j previous codebooks embeddings before # the current codebook_idx codebook # forward the GPT model itself input_embeds = [ input_embeds_layer(input_ids[:, :, i]).unsqueeze(-1) for i, input_embeds_layer in enumerate(self.input_embeds_layers) ] # token embeddings of shape (b, t, n_embd) input_embeds = torch.cat(input_embeds, dim=-1) input_embeds = input_embeds[:, :, :, : codebook_idx + 1].sum(dim=-1) input_shape = input_embeds.size()[:-1] batch_size = input_embeds.shape[0] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else input_embeds.device if position_ids is None: position_ids = torch.arange(0, seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # shape (1, seq_length) position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd) # Attention mask. if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length] # from_seq_length is 1 to easily broadcast attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) head_mask = self.get_head_mask(head_mask, self.config.num_layers) hidden_states = self.drop(input_embeds + position_embeds) output_shape = input_shape + (hidden_states.size(-1),) all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block( hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], output_attentions=output_attentions, ) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) hidden_states = self.layernorm_final(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) logits = self.lm_heads[codebook_idx - self.config.n_codes_given](hidden_states) if not return_dict: return tuple(v for v in [None, logits, all_hidden_states, all_self_attentions] if v is not None) return MaskedLMOutput( loss=loss, logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def generate( self, coarse_output: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig = None, coarse_generation_config: BarkCoarseGenerationConfig = None, fine_generation_config: BarkFineGenerationConfig = None, codebook_size: int = 1024, history_prompt: Optional[Dict[str, torch.Tensor]] = None, **kwargs, ) -> torch.LongTensor: """ Generates fine acoustics tokens from input coarse acoustics tokens and an additional optional `Bark` speaker prompt. Args: coarse_output (`torch.Tensor` of shape (batch_size, seq_len)): Input coarse acoustics ids, i.e the output of `BarkCoarseModel.generate`. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. coarse_generation_config (`BarkCoarseGenerationConfig`): Generation config indicating how to generate the coarse tokens. fine_generation_config (`BarkFineGenerationConfig`): Generation config indicating how to generate the fine tokens. codebook_size (`int`, *optional*, defaults to 1024): Codebook channel size, i.e. the size of the output vocabulary per codebook channel. history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. Returns: torch.LongTensor: Output fine acoustics tokens. """ if semantic_generation_config is None: raise ValueError("`semantic_generation_config` has to be provided") if coarse_generation_config is None: raise ValueError("`coarse_generation_config` has to be provided") if fine_generation_config is None: raise ValueError("`fine_generation_config` has to be provided") # since we don't really use GenerationConfig through the fine model (autoencoder) # and since only temperature is used from the classic GenerationConfig parameters # manually impose the kwargs priority over the generation config temperature = kwargs.get("temperature", fine_generation_config.temperature) max_fine_history_length = fine_generation_config.max_fine_history_length max_fine_input_length = fine_generation_config.max_fine_input_length # shape: (batch, n_coarse_codebooks * seq_len) # new_shape: (batch, seq_len, n_coarse_codebooks) coarse_output = coarse_output.view(coarse_output.shape[0], -1, coarse_generation_config.n_coarse_codebooks) # brings ids into the range [0, codebook_size -1] coarse_output = torch.remainder(coarse_output - semantic_generation_config.semantic_vocab_size, codebook_size) batch_size = coarse_output.shape[0] if history_prompt is not None: x_fine_history = torch.repeat_interleave(history_prompt["fine_prompt"].T[None], batch_size, dim=0) # transpose to get to shape (seq_len, n_fine_codebooks) else: x_fine_history = None n_coarse = coarse_generation_config.n_coarse_codebooks # pad the last 6th codebooks fine_input = F.pad( coarse_output, (0, fine_generation_config.n_fine_codebooks - n_coarse), "constant", codebook_size, ) # prepend history if available (max max_fine_history_length) if x_fine_history is not None: fine_input = torch.cat([x_fine_history[:, -max_fine_history_length:, :], fine_input], dim=1) # len of the fine_history that has been added to fine_input n_history = x_fine_history[:, -max_fine_history_length:, :].shape[1] else: n_history = 0 n_remove_from_end = 0 # need to pad if too short (since non-causal model) if fine_input.shape[1] < max_fine_input_length: n_remove_from_end = max_fine_input_length - fine_input.shape[1] fine_input = F.pad(fine_input, (0, 0, 0, n_remove_from_end), mode="constant", value=codebook_size) # we can be lazy about fractional loop and just keep overwriting codebooks. # seems that coarse_output.shape[1] - (max_fine_input_length - n_history) is equal to minus n_remove_from_end # So if we needed to pad because too short, n_loops is always 1 (because n_remove_from_end > 0) # If not, we loop over at least twice. n_loops = (coarse_output.shape[1] - (max_fine_input_length - n_history)) / max_fine_history_length n_loops = int(np.ceil(n_loops)) n_loops = max(0, n_loops) + 1 for n_outer in range(n_loops): start_idx = min([n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_input_length]) start_fill_idx = min( [n_history + n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_history_length] ) rel_start_fill_idx = start_fill_idx - start_idx input_buffer = fine_input[:, start_idx : start_idx + max_fine_input_length, :] for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): logits = self.forward(n_inner, input_buffer).logits if temperature is None or temperature == 1.0: relevant_logits = logits[:, rel_start_fill_idx:, :codebook_size] codebook_preds = torch.argmax(relevant_logits, -1) else: relevant_logits = logits[:, :, :codebook_size] / temperature # apply softmax probs = F.softmax(relevant_logits, dim=-1)[:, rel_start_fill_idx:max_fine_input_length] # reshape to 2D: (batch_size, seq_len, codebook_size) -> (batch_size*seq_len, codebook_size) probs = probs.reshape((-1, codebook_size)) # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len) codebook_preds = torch.multinomial(probs, num_samples=1).view(batch_size, -1) codebook_preds = codebook_preds.to(torch.int32) input_buffer[:, rel_start_fill_idx:, n_inner] = codebook_preds del logits, codebook_preds # transfer into fine_input for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): fine_input[ :, start_fill_idx : start_fill_idx + (max_fine_input_length - rel_start_fill_idx), n_inner ] = input_buffer[:, rel_start_fill_idx:, n_inner] del input_buffer fine_input = fine_input.transpose(1, 2)[:, :, n_history:] if n_remove_from_end > 0: fine_input = fine_input[:, :, :-n_remove_from_end] if fine_input.shape[-1] != coarse_output.shape[-2]: raise ValueError("input and output should have the same seq_len") return fine_input @add_start_docstrings( """ The full Bark model, a text-to-speech model composed of 4 sub-models: - [`BarkSemanticModel`] (also referred to as the 'text' model): a causal auto-regressive transformer model that takes as input tokenized text, and predicts semantic text tokens that capture the meaning of the text. - [`BarkCoarseModel`] (also refered to as the 'coarse acoustics' model), also a causal autoregressive transformer, that takes into input the results of the last model. It aims at regressing the first two audio codebooks necessary to `encodec`. - [`BarkFineModel`] (the 'fine acoustics' model), this time a non-causal autoencoder transformer, which iteratively predicts the last codebooks based on the sum of the previous codebooks embeddings. - having predicted all the codebook channels from the [`EncodecModel`], Bark uses it to decode the output audio array. It should be noted that each of the first three modules can support conditional speaker embeddings to condition the output sound according to specific predefined voice. """, BARK_START_DOCSTRING, ) class BarkModel(BarkPreTrainedModel): config_class = BarkConfig def __init__(self, config): super().__init__(config) self.semantic = BarkSemanticModel(config.semantic_config) self.coarse_acoustics = BarkCoarseModel(config.coarse_acoustics_config) self.fine_acoustics = BarkFineModel(config.fine_acoustics_config) self.codec_model = AutoModel.from_config(config.codec_config) self.config = config @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ # for bark_model, device must be verified on its sub-models # if has _hf_hook, has been offloaded so the device has to be found in the hook if not hasattr(self.semantic, "_hf_hook"): return get_parameter_device(self) for module in self.semantic.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) def enable_cpu_offload(self, gpu_id: Optional[int] = 0): r""" Offloads all sub-models to CPU using accelerate, reducing memory usage with a low impact on performance. This method moves one whole sub-model at a time to the GPU when it is used, and the sub-model remains in GPU until the next sub-model runs. Args: gpu_id (`int`, *optional*, defaults to 0): GPU id on which the sub-models will be loaded and offloaded. """ if is_accelerate_available(): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate`.") device = torch.device(f"cuda:{gpu_id}") if self.device.type != "cpu": self.to("cpu") torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) # this layer is used outside the first foward pass of semantic so need to be loaded before semantic self.semantic.input_embeds_layer, _ = cpu_offload_with_hook(self.semantic.input_embeds_layer, device) hook = None for cpu_offloaded_model in [ self.semantic, self.coarse_acoustics, self.fine_acoustics, ]: _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) self.fine_acoustics_hook = hook _, hook = cpu_offload_with_hook(self.codec_model, device, prev_module_hook=hook) # We'll offload the last model manually. self.codec_model_hook = hook def codec_decode(self, fine_output, output_lengths=None): """Turn quantized audio codes into audio array using encodec.""" fine_output = fine_output.transpose(0, 1) emb = self.codec_model.quantizer.decode(fine_output) if output_lengths is not None: # encodec uses LSTMs which behaves differently with appended padding # decoding with encodec takes around 0.1% of the total generation time # to keep generation quality, we break batching out = [sample[:, :l].unsqueeze(0) for (sample, l) in zip(emb, output_lengths)] audio_arr = [self.codec_model.decoder(sample).squeeze() for sample in out] else: out = self.codec_model.decoder(emb) audio_arr = out.squeeze(1) # squeeze the codebook dimension return audio_arr @torch.no_grad() def generate( self, input_ids: Optional[torch.Tensor] = None, history_prompt: Optional[Dict[str, torch.Tensor]] = None, return_output_lengths: Optional[bool] = None, **kwargs, ) -> torch.LongTensor: """ Generates audio from an input prompt and an additional optional `Bark` speaker prompt. Args: input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*): Input ids. Will be truncated up to 256 tokens. Note that the output audios will be as long as the longest generation among the batch. history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. Note that for now, this model takes only one speaker prompt per batch. kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments are of two types: - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model. - With a *semantic_*, *coarse_*, *fine_* prefix, they will be input for the `generate` method of the semantic, coarse and fine respectively. It has the priority over the keywords without a prefix. This means you can, for example, specify a generation strategy for all sub-models except one. return_output_lengths (`bool`, *optional*): Whether or not to return the waveform lengths. Useful when batching. Returns: By default: - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform. When `return_output_lengths=True`: Returns a tuple made of: - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform. - **output_lengths** (`torch.Tensor` of shape (batch_size)): The length of each waveform in the batch Example: ```python >>> from transformers import AutoProcessor, BarkModel >>> processor = AutoProcessor.from_pretrained("suno/bark-small") >>> model = BarkModel.from_pretrained("suno/bark-small") >>> # To add a voice preset, you can pass `voice_preset` to `BarkProcessor.__call__(...)` >>> voice_preset = "v2/en_speaker_6" >>> inputs = processor("Hello, my dog is cute, I need him in my life", voice_preset=voice_preset) >>> audio_array = model.generate(**inputs, semantic_max_new_tokens=100) >>> audio_array = audio_array.cpu().numpy().squeeze() ``` """ # TODO (joao):workaround until nested generation config is compatible with PreTrained Model # todo: dict semantic_generation_config = BarkSemanticGenerationConfig(**self.generation_config.semantic_config) coarse_generation_config = BarkCoarseGenerationConfig(**self.generation_config.coarse_acoustics_config) fine_generation_config = BarkFineGenerationConfig(**self.generation_config.fine_acoustics_config) kwargs_semantic = { # if "attention_mask" is set, it should not be passed to CoarseModel and FineModel "attention_mask": kwargs.pop("attention_mask", None), "min_eos_p": kwargs.pop("min_eos_p", None), } kwargs_coarse = {} kwargs_fine = {} for key, value in kwargs.items(): if key.startswith("semantic_"): key = key[len("semantic_") :] kwargs_semantic[key] = value elif key.startswith("coarse_"): key = key[len("coarse_") :] kwargs_coarse[key] = value elif key.startswith("fine_"): key = key[len("fine_") :] kwargs_fine[key] = value else: # If the key is already in a specific config, then it's been set with a # submodules specific value and we don't override if key not in kwargs_semantic: kwargs_semantic[key] = value if key not in kwargs_coarse: kwargs_coarse[key] = value if key not in kwargs_fine: kwargs_fine[key] = value # 1. Generate from the semantic model if "generation_config" in kwargs_semantic: kwargs_semantic.pop("generation_config") semantic_output = self.semantic.generate( input_ids, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, **kwargs_semantic, ) # 2. Generate from the coarse model if "generation_config" in kwargs_coarse: kwargs_coarse.pop("generation_config") coarse_output = self.coarse_acoustics.generate( semantic_output, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, codebook_size=self.generation_config.codebook_size, return_output_lengths=return_output_lengths, **kwargs_coarse, ) output_lengths = None if return_output_lengths: coarse_output, output_lengths = coarse_output # (batch_size, seq_len*coarse_codebooks) -> (batch_size, seq_len) output_lengths = output_lengths // coarse_generation_config.n_coarse_codebooks # 3. "generate" from the fine model if "generation_config" in kwargs_fine: kwargs_fine.pop("generation_config") output = self.fine_acoustics.generate( coarse_output, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=self.generation_config.codebook_size, **kwargs_fine, ) if getattr(self, "fine_acoustics_hook", None) is not None: # Manually offload fine_acoustics to CPU # and load codec_model to GPU # since bark doesn't use codec_model forward pass self.fine_acoustics_hook.offload() self.codec_model = self.codec_model.to(self.device) # 4. Decode the output and generate audio array audio = self.codec_decode(output, output_lengths) if getattr(self, "codec_model_hook", None) is not None: # Offload codec_model to CPU self.codec_model_hook.offload() if return_output_lengths: output_lengths = [len(sample) for sample in audio] audio = nn.utils.rnn.pad_sequence(audio, batch_first=True, padding_value=0) return audio, output_lengths return audio @classmethod def _check_and_enable_flash_attn_2( cls, config, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[Union[str, Dict[str, int]]] = None, hard_check_only: bool = False, check_device_map: bool = False, ): """ `_check_and_enable_flash_attn_2` originally don't expand flash attention enabling to the model sub-configurations. We override the original method to make sure that Bark sub-models are using Flash Attention if necessary. If you don't know about Flash Attention, check out the official repository of flash attention: https://github.com/Dao-AILab/flash-attention For using Flash Attention 1.0 you can do it directly via the `BetterTransformer` API, have a look at this specific section of the documentation to learn more about it: https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#decoder-models The method checks if the current setup is compatible with Flash Attention as it requires the model to be in half precision and not ran on CPU. If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module """ config = super()._check_and_enable_flash_attn_2( config, torch_dtype, device_map, hard_check_only=hard_check_only, check_device_map=check_device_map ) config.semantic_config._attn_implementation = config._attn_implementation config.coarse_acoustics_config._attn_implementation = config._attn_implementation config.fine_acoustics_config._attn_implementation = config._attn_implementation return config __all__ = [ "BarkFineModel", "BarkSemanticModel", "BarkCoarseModel", "BarkModel", "BarkPreTrainedModel", "BarkCausalModel", ]
transformers/src/transformers/models/bark/modeling_bark.py/0
{ "file_path": "transformers/src/transformers/models/bark/modeling_bark.py", "repo_id": "transformers", "token_count": 35639 }
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BEiT model configuration""" import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices class BeitConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BEiT [microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture. Args: vocab_size (`int`, *optional*, defaults to 8192): Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during pre-training. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. use_mask_token (`bool`, *optional*, defaults to `False`): Whether to use a mask token for masked image modeling. use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`): Whether to use BERT-style absolute position embeddings. use_relative_position_bias (`bool`, *optional*, defaults to `False`): Whether to use T5-style relative position embeddings in the self-attention layers. use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`): Whether to use the same relative position embeddings across all self-attention layers of the Transformer. layer_scale_init_value (`float`, *optional*, defaults to 0.1): Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate per sample (when applied in the main path of residual layers). use_mean_pooling (`bool`, *optional*, defaults to `True`): Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the CLS token, before applying the classification head. pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): Pooling scales used in Pooling Pyramid Module applied on the last feature map. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. auxiliary_channels (`int`, *optional*, defaults to 256): Number of channels to use in the auxiliary head. auxiliary_num_convs (`int`, *optional*, defaults to 1): Number of convolutional layers to use in the auxiliary head. auxiliary_concat_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the output of the auxiliary head with the input before the classification layer. semantic_loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function of the semantic segmentation model. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. add_fpn (`bool`, *optional*, defaults to `False`): Whether to add a FPN as part of the backbone. Only relevant for [`BeitBackbone`]. reshape_hidden_states (`bool`, *optional*, defaults to `True`): Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size, seq_len, hidden_size)`. Only relevant for [`BeitBackbone`]. Example: ```python >>> from transformers import BeitConfig, BeitModel >>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration >>> configuration = BeitConfig() >>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration >>> model = BeitModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "beit" def __init__( self, vocab_size=8192, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, use_mask_token=False, use_absolute_position_embeddings=False, use_relative_position_bias=False, use_shared_relative_position_bias=False, layer_scale_init_value=0.1, drop_path_rate=0.1, use_mean_pooling=True, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, semantic_loss_ignore_index=255, out_features=None, out_indices=None, add_fpn=False, reshape_hidden_states=True, **kwargs, ): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.use_mask_token = use_mask_token self.use_absolute_position_embeddings = use_absolute_position_embeddings self.use_relative_position_bias = use_relative_position_bias self.use_shared_relative_position_bias = use_shared_relative_position_bias self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.use_mean_pooling = use_mean_pooling # decode head attributes (semantic segmentation) self.pool_scales = pool_scales # auxiliary head attributes (semantic segmentation) self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.auxiliary_channels = auxiliary_channels self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.semantic_loss_ignore_index = semantic_loss_ignore_index # handle backwards compatibility if "segmentation_indices" in kwargs: warnings.warn( "The `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.", FutureWarning, ) out_indices = kwargs.pop("segmentation_indices") # backbone attributes self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) self.add_fpn = add_fpn self.reshape_hidden_states = reshape_hidden_states # Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig class BeitOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 __all__ = ["BeitConfig", "BeitOnnxConfig"]
transformers/src/transformers/models/beit/configuration_beit.py/0
{ "file_path": "transformers/src/transformers/models/beit/configuration_beit.py", "repo_id": "transformers", "token_count": 4436 }
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Big Bird model.""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: BigBirdTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} SPIECE_UNDERLINE = "▁" class BigBirdTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" BigBird tokenizer (backed by HuggingFace's *tokenizers* library). Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = BigBirdTokenizer model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] def __init__( self, vocab_file=None, tokenizer_file=None, unk_token="<unk>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", sep_token="[SEP]", mask_token="[MASK]", cls_token="[CLS]", **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An BigBird sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Set to True if the token list is already formatted with special tokens for the model Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0] if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) __all__ = ["BigBirdTokenizerFast"]
transformers/src/transformers/models/big_bird/tokenization_big_bird_fast.py/0
{ "file_path": "transformers/src/transformers/models/big_bird/tokenization_big_bird_fast.py", "repo_id": "transformers", "token_count": 4154 }
# coding=utf-8 # Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Blenderbot model configuration""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging logger = logging.get_logger(__name__) class BlenderbotConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BlenderbotModel`]. It is used to instantiate an Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Blenderbot [facebook/blenderbot-3B](https://huggingface.co/facebook/blenderbot-3B) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlenderbotModel`] or [`TFBlenderbotModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 128): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import BlenderbotConfig, BlenderbotModel >>> # Initializing a Blenderbot facebook/blenderbot-3B style configuration >>> configuration = BlenderbotConfig() >>> # Initializing a model (with random weights) from the facebook/blenderbot-3B style configuration >>> model = BlenderbotModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "blenderbot" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=8008, max_position_embeddings=128, encoder_layers=2, encoder_ffn_dim=10240, encoder_attention_heads=32, decoder_layers=24, decoder_ffn_dim=10240, decoder_attention_heads=32, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=2560, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=1, scale_embedding=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, encoder_no_repeat_ngram_size=3, forced_eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, forced_eos_token_id=forced_eos_token_id, **kwargs, ) class BlenderbotOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") elif self.task == "causal-lm": common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _, num_decoder_layers = self.num_layers for i in range(num_decoder_layers): common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} else: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) # Generate decoder inputs decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, decoder_seq_length, is_pair, framework ) decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, encoder_seq_length = common_inputs["input_ids"].shape decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) decoder_past_length = decoder_seq_length decoder_shape = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) common_inputs["decoder_attention_mask"] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1 ) common_inputs["past_key_values"] = [] _, num_decoder_layers = self.num_layers for _ in range(num_decoder_layers): common_inputs["past_key_values"].append( ( torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape), ) ) return common_inputs def _generate_dummy_inputs_for_causal_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape past_key_values_length = seqlen _, num_decoder_layers = self.num_layers num_encoder_attention_heads, _ = self.num_attention_heads past_shape = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) mask_dtype = common_inputs["attention_mask"].dtype common_inputs["attention_mask"] = torch.cat( [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 ) common_inputs["past_key_values"] = [ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_decoder_layers) ] return common_inputs # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._generate_dummy_inputs_for_sequence_classification_and_question_answering def _generate_dummy_inputs_for_sequence_classification_and_question_answering( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.generate_dummy_inputs def generate_dummy_inputs( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) elif self.task == "causal-lm": common_inputs = self._generate_dummy_inputs_for_causal_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) return common_inputs # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._flatten_past_key_values_ def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ["default", "seq2seq-lm"]: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t ) def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str): if direction not in ["inputs", "outputs"]: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') name = "past_key_values" if direction == "inputs" else "present" _, num_decoder_layers = self.num_layers encoder_sequence = "past_encoder_sequence" decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence" for i in range(num_decoder_layers): inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence} inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence} __all__ = ["BlenderbotConfig", "BlenderbotOnnxConfig"]
transformers/src/transformers/models/blenderbot/configuration_blenderbot.py/0
{ "file_path": "transformers/src/transformers/models/blenderbot/configuration_blenderbot.py", "repo_id": "transformers", "token_count": 8300 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def load_demo_image(image_size, device): img_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") transform = transforms.Compose( [ transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ] ) image = transform(raw_image).unsqueeze(0).to(device) return image def rename_key(key): if "visual_encoder" in key: key = re.sub("visual_encoder*", "vision_model.encoder", key) if "blocks" in key: key = re.sub(r"blocks", "layers", key) if "attn" in key: key = re.sub(r"attn", "self_attn", key) if "norm1" in key: key = re.sub(r"norm1", "layer_norm1", key) if "norm2" in key: key = re.sub(r"norm2", "layer_norm2", key) if "encoder.norm" in key: key = re.sub(r"encoder.norm", "post_layernorm", key) if "encoder.patch_embed.proj" in key: key = re.sub(r"encoder.patch_embed.proj", "embeddings.patch_embedding", key) if "encoder.pos_embed" in key: key = re.sub(r"encoder.pos_embed", "embeddings.position_embedding", key) if "encoder.cls_token" in key: key = re.sub(r"encoder.cls_token", "embeddings.class_embedding", key) if "self_attn" in key: key = re.sub(r"self_attn.proj", "self_attn.projection", key) return key @torch.no_grad() def convert_blip_checkpoint(pytorch_dump_folder_path, config_path=None): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = BlipConfig.from_pretrained(config_path) else: config = BlipConfig(projection_dim=512, text_config={}, vision_config={}) hf_model = BlipForConditionalGeneration(config).eval() model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" pt_model = blip_decoder(pretrained=model_url, image_size=384, vit="base") pt_model = pt_model.eval() modified_state_dict = pt_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_model.load_state_dict(modified_state_dict) image_size = 384 image = load_demo_image(image_size=image_size, device="cpu") tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") input_ids = tokenizer(["a picture of"]).input_ids out = hf_model.generate(image, input_ids) assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] out = hf_model.generate(image) assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(pytorch_dump_folder_path) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' model_url = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" ) vqa_model = blip_vqa(pretrained=model_url, image_size=image_size, vit="base") vqa_model.eval() modified_state_dict = vqa_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_vqa_model = BlipForQuestionAnswering(config) hf_vqa_model.load_state_dict(modified_state_dict) question = ["How many dogs are in this image?"] question_input_ids = tokenizer(question, return_tensors="pt").input_ids answer = hf_vqa_model.generate(question_input_ids, image) print(tokenizer.decode(answer[0])) assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa") model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" itm_model = blip_itm(pretrained=model_url, image_size=image_size, vit="base") itm_model.eval() modified_state_dict = itm_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_itm_model = BlipForImageTextRetrieval(config) question = ["A picture of a woman with a dog sitting in a beach"] question_input_ids = tokenizer( question, return_tensors="pt", padding="max_length", truncation=True, max_length=35, ).input_ids hf_itm_model.load_state_dict(modified_state_dict) hf_itm_model.eval() out_itm = hf_itm_model(question_input_ids, image, use_itm_head=True) out = hf_itm_model(question_input_ids, image, use_itm_head=False) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0], dim=1)[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") args = parser.parse_args() convert_blip_checkpoint(args.pytorch_dump_folder_path, args.config_path)
transformers/src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py", "repo_id": "transformers", "token_count": 2801 }
# coding=utf-8 # Copyright 2024 Meta Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for Chameleon. """ from typing import List, Optional, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack, _validate_images_text_input_order from ...tokenization_utils_base import PreTokenizedInput, TextInput class ChameleonTextKwargs(TextKwargs, total=False): return_for_text_completion: bool class ChameleonProcessorKwargs(ProcessingKwargs, total=False): text_kwargs: ChameleonTextKwargs _defaults = { "text_kwargs": { "padding": False, "return_for_text_completion": False, }, "common_kwargs": { "return_tensors": "pt", }, } class ChameleonProcessor(ProcessorMixin): r""" Constructs a Chameleon processor which wraps a Chameleon image processor and a Chameleon tokenizer into a single processor. [`ChameleonProcessor`] offers all the functionalities of [`ChameleonImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~ChameleonProcessor.__call__`] and [`~ChameleonProcessor.decode`] for more information. Args: image_processor ([`ChameleonImageProcessor`]): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`]): The tokenizer is a required input. image_seq_length (`int`, *optional*, defaults to 1024): Sequence length of one image embedding. image_token (`str`, *optional*, defaults to `"<image>"`): The special token used to indicate image in the text. """ attributes = ["image_processor", "tokenizer"] tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast") valid_kwargs = ["image_seq_length", "image_token"] image_processor_class = "ChameleonImageProcessor" def __init__(self, image_processor, tokenizer, image_seq_length: int = 1024, image_token: str = "<image>"): self.image_seq_length = image_seq_length self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token self.image_start_token = ( tokenizer.boi_token if hasattr(tokenizer, "boi_token") else "<racm3:break>" ) # fixed tokens for start and end, so can hardcode self.image_end_token = tokenizer.eoi_token if hasattr(tokenizer, "eoi_token") else "<eoss>" super().__init__(image_processor, tokenizer) def __call__( self, images: Optional[ImageInput] = None, text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, audio=None, videos=None, **kwargs: Unpack[ChameleonProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ # check if images and text inputs are reversed for BC images, text = _validate_images_text_input_order(images, text) if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise TypeError("Invalid input text. Please provide a string, or a list of strings") if text is None and images is None: raise ValueError("You must provide either text or images") output_kwargs = self._merge_kwargs( ChameleonProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) return_for_text_completion = output_kwargs["text_kwargs"].pop("return_for_text_completion", False) # Replace the image token with the expanded image token sequence prompt_strings = [] one_img_tokens = self.image_start_token + (self.image_token * self.image_seq_length) + self.image_end_token for sample in text: sample = sample.replace(self.image_token, one_img_tokens) if not return_for_text_completion: sample += self.tokenizer.sep_token # special Chameleon treatment to add sep for chat mode prompt_strings.append(sample) data = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"]) if images is not None: data["pixel_values"] = self.image_processor(images, **output_kwargs["images_kwargs"])["pixel_values"] return BatchFeature(data=data, tensor_type=output_kwargs["common_kwargs"]["return_tensors"]) # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama def decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) __all__ = ["ChameleonProcessor"]
transformers/src/transformers/models/chameleon/processing_chameleon.py/0
{ "file_path": "transformers/src/transformers/models/chameleon/processing_chameleon.py", "repo_id": "transformers", "token_count": 3260 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from clip import load from transformers import CLIPConfig, CLIPModel def copy_attn_layer(hf_attn_layer, pt_attn_layer): q_proj, k_proj, v_proj = pt_attn_layer.in_proj_weight.chunk(3, dim=0) q_proj_bias, k_proj_bias, v_proj_bias = pt_attn_layer.in_proj_bias.chunk(3, dim=0) out_proj_weights = pt_attn_layer.out_proj.weight out_proj_bias = pt_attn_layer.out_proj.bias hf_attn_layer.q_proj.weight.data = q_proj hf_attn_layer.q_proj.bias.data = q_proj_bias hf_attn_layer.k_proj.weight.data = k_proj hf_attn_layer.k_proj.bias.data = k_proj_bias hf_attn_layer.v_proj.weight.data = v_proj hf_attn_layer.v_proj.bias.data = v_proj_bias hf_attn_layer.out_proj.weight = out_proj_weights hf_attn_layer.out_proj.bias = out_proj_bias def copy_mlp(hf_mlp, pt_mlp): copy_linear(hf_mlp.fc1, pt_mlp.c_fc) copy_linear(hf_mlp.fc2, pt_mlp.c_proj) def copy_linear(hf_linear, pt_linear): hf_linear.weight = pt_linear.weight hf_linear.bias = pt_linear.bias def copy_layer(hf_layer, pt_layer): # copy layer norms copy_linear(hf_layer.layer_norm1, pt_layer.ln_1) copy_linear(hf_layer.layer_norm2, pt_layer.ln_2) # copy MLP copy_mlp(hf_layer.mlp, pt_layer.mlp) # copy attn copy_attn_layer(hf_layer.self_attn, pt_layer.attn) def copy_layers(hf_layers, pt_layers): for hf_layer, pt_layer in zip(hf_layers, pt_layers): copy_layer(hf_layer, pt_layer) def copy_encoder(hf_encoder, pt_model): # copy embeds hf_encoder.embeddings.token_embedding.weight = pt_model.token_embedding.weight hf_encoder.embeddings.position_embedding.weight.data = pt_model.positional_embedding # copy layer norm copy_linear(hf_encoder.final_layer_norm, pt_model.ln_final) # copy hidden layers copy_layers(hf_encoder.encoder.layers, pt_model.transformer.resblocks) def copy_text_model_and_projection(hf_model, pt_model): # copy projection hf_model.text_projection.weight.data = pt_model.text_projection.data.T.contiguous() # copy text encoder copy_encoder(hf_model.text_model, pt_model) def copy_vison_model_and_projection(hf_model, pt_model): # copy projection hf_model.visual_projection.weight.data = pt_model.visual.proj.data.T.contiguous() # copy layer norms copy_linear(hf_model.vision_model.pre_layrnorm, pt_model.visual.ln_pre) copy_linear(hf_model.vision_model.post_layernorm, pt_model.visual.ln_post) # copy embeds hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_model.visual.conv1.weight.data hf_model.vision_model.embeddings.class_embedding = pt_model.visual.class_embedding hf_model.vision_model.embeddings.position_embedding.weight.data = pt_model.visual.positional_embedding.data # copy encoder copy_layers(hf_model.vision_model.encoder.layers, pt_model.visual.transformer.resblocks) @torch.no_grad() def convert_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = CLIPConfig.from_pretrained(config_path) else: config = CLIPConfig(projection_dim=512, text_config={}, vision_config={}) hf_model = CLIPModel(config).eval() pt_model, _ = load(checkpoint_path, device="cpu", jit=False) pt_model = pt_model.eval() copy_text_model_and_projection(hf_model, pt_model) copy_vison_model_and_projection(hf_model, pt_model) hf_model.logit_scale = pt_model.logit_scale # Use `eos_token` so the example is more meaningful input_ids = torch.tensor( [ [config.text_config.bos_token_id] + list(range(3, 77)) + [config.text_config.eos_token_id] + [config.text_config.pad_token_id] ] ) pixel_values = torch.randn(1, 3, 224, 224) hf_outputs = hf_model(input_ids=input_ids, pixel_values=pixel_values, return_dict=True) hf_logits_per_image = hf_outputs.logits_per_image hf_logits_per_text = hf_outputs.logits_per_text pt_logits_per_image, pt_logits_per_text = pt_model(pixel_values, input_ids) assert torch.allclose(hf_logits_per_image, pt_logits_per_image, atol=1e-3) assert torch.allclose(hf_logits_per_text, pt_logits_per_text, atol=1e-3) hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to OpenAI checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") args = parser.parse_args() convert_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
transformers/src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py", "repo_id": "transformers", "token_count": 2313 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """CLVP model configuration""" import os from typing import TYPE_CHECKING, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ClvpEncoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ClvpEncoder`]. It is used to instantiate a CLVP text or CLVP speech encoder according to the specified arguments. Instantiating a configuration with the defaults will yield a similar configuration to that of the encoder of the CLVP [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256): Vocabulary size of the CLVP Encoder model. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 1536): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. projection_dim (`int`, *optional*, defaults to 768): Dimensionality of the projection vector. num_hidden_layers (`int`, *optional*, defaults to 20): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the feed-forward layers in [`ClvpEncoderMLP`]. use_rotary_embedding (`bool`, *optional*, defaults to `True`): Whether to use rotary_embedding or not. use_attention_bias (`bool`, *optional*, defaults to `False`): Whether to use bias in Query, Key and Value layers during self attention. summary_type (`str`, *optional*, defaults to `"mean"`): What strategy to use to get pooler_output from the last_hidden_state. `"last"`, `"first"`, `"mean"` and `"cls_index"` are supported. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization testing). bos_token_id (`int`, *optional*, defaults to 255): Beginning of sequence token id. eos_token_id (`int`, *optional*, defaults to 0): End of sequence token id. Example: ```python >>> from transformers import ClvpEncoderConfig, ClvpEncoder >>> # Initializing a ClvpEncoderConfig with susnato/clvp_dev style configuration >>> encoder_configuration = ClvpEncoderConfig() >>> # Initializing a ClvpEncoder (with random weights) from the susnato/clvp_dev style configuration >>> model = ClvpEncoder(encoder_configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "clvp_encoder" base_config_key = ["text_config", "speech_config"] def __init__( self, vocab_size=256, hidden_size=768, intermediate_size=1536, projection_dim=768, num_hidden_layers=20, num_attention_heads=12, hidden_act="gelu", layer_norm_eps=1e-5, attention_dropout=0.1, dropout=0.1, use_rotary_embedding=True, use_attention_bias=False, summary_type="mean", initializer_factor=1.0, bos_token_id=255, eos_token_id=0, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.dropout = dropout self.use_rotary_embedding = use_rotary_embedding self.use_attention_bias = use_attention_bias self.summary_type = summary_type self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], config_type: str = "text_config", **kwargs ) -> "PretrainedConfig": cls._set_token_in_kwargs(kwargs) config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) # make sure to have the config_type be either "text_config" or "speech_config" # this is to make sure that we can load only text or speech configs from the nested ClvpConfig. if config_type not in cls.base_config_key: raise ValueError( f"We can only load either 'text_config' or 'speech_config' but you are trying to load" f"{config_type}" ) # get the text config dict if we are loading from ClvpConfig if config_dict.get("model_type") == "clvp": config_dict = config_dict[config_type] if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(config_dict, **kwargs) class ClvpDecoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ClvpDecoder`]. It is used to instantiate a CLVP Decoder Model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Decoder part of the CLVP [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. The architecture is similar to GPT2. Args: vocab_size (`int`, *optional*, defaults to 8194): Vocabulary size of the model. max_position_embeddings (`int`, *optional*, defaults to 608): The maximum sequence length of mel tokens that this model might ever be used with. Similar to `n_positions` in `GPT2Config`. max_text_tokens (`int`, *optional*, defaults to 404): The maximum sequence length of text tokens that this model might ever be used with. Similar to `n_positions` in `GPT2Config`. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the embeddings and hidden states. num_hidden_layers (`int`, *optional*, defaults to 30): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. n_inner (`int`, *optional*): Dimensionality of the inner feed-forward layers. `None` will set it to 4 times `hidden_size`. num_mel_attn_blocks (`int`, *optional*, defaults to 6): Denotes the number of self attention layers in [`ClvpConditioningEncoder`]. activation_function (`str`, *optional*, defaults to `"gelu_new"`): Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. summary_type (`string`, *optional*, defaults to `"cls_index"`): Argument used when doing sequence summary. Has to be one of the following options: - `"last"`: Take the last token hidden state (like XLNet). - `"first"`: Take the first token hidden state (like BERT). - `"mean"`: Take the mean of all tokens hidden states. - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - `"attn"`: Not implemented now, use multi-head attention. summary_use_proj (`bool`, *optional*, defaults to `True`): Whether or not to add a projection after the vector extraction. summary_activation (`str`, *optional*): Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation. summary_proj_to_labels (`bool`, *optional*, defaults to `True`): Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes. summary_first_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio to be used after the projection and activation. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). bos_token_id (`int`, *optional*, defaults to 8192): Beginning of sequence token id, used at the start of the generation. eos_token_id (`int`, *optional*, defaults to 8193): End of sequence token id, used in the method [`ClvpModelForConditionalGeneration.fix_speech_decoder_output()`] to correct decoder outputs. feature_size (`int`, *optional*, defaults to 80): The feature dimension of the extracted mel features. This value is used in [`ClvpConditioningEncoder`]. use_attention_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in Query, Key and Value layers during self attention. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization testing). decoder_fixing_codes (`list`, *optional*, defaults to `[83, 45, 45, 248]`): These values are used in the method `fix_speech_decoder_output` to fix decoder generated outputs. Example: ```python >>> from transformers import ClvpDecoderConfig, ClvpDecoder >>> # Initializing a ClvpDecoderConfig with susnato/clvp_dev style configuration >>> decoder_configuration = ClvpDecoderConfig() >>> # Initializing a ClvpDecoder (with random weights) from the susnato/clvp_dev style configuration >>> model = ClvpDecoder(decoder_configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "clvp_decoder" base_config_key = "decoder_config" def __init__( self, vocab_size=8194, max_position_embeddings=608, max_text_tokens=404, hidden_size=1024, num_hidden_layers=30, num_attention_heads=16, n_inner=None, num_mel_attn_blocks=6, activation_function="gelu_new", resid_pdrop=0.1, embd_pdrop=0.1, attention_dropout=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, summary_type="cls_index", summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, use_cache=True, bos_token_id=8192, eos_token_id=8193, feature_size=80, use_attention_bias=True, initializer_factor=1.0, decoder_fixing_codes=[83, 45, 45, 248], **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.max_text_tokens = max_text_tokens self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.n_inner = n_inner self.num_mel_attn_blocks = num_mel_attn_blocks self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attention_dropout = attention_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels self.use_cache = use_cache self.feature_size = feature_size self.use_attention_bias = use_attention_bias self.initializer_factor = initializer_factor self.decoder_fixing_codes = decoder_fixing_codes self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) class ClvpConfig(PretrainedConfig): r""" [`ClvpConfig`] is the configuration class to store the configuration of a [`ClvpModelForConditionalGeneration`]. It is used to instantiate a CLVP model according to the specified arguments, defining the text model, speech model and decoder model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLVP [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize the CLVP text encoder. speech_config (`dict`, *optional*): Dictionary of configuration options used to initialize CLVP speech encoder. decoder_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`ClvpDecoderConfig`]. projection_dim (`int`, *optional*, defaults to 768): Dimensionality of text and speech projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original CLVP implementation. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization testing). kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ClvpConfig, ClvpModelForConditionalGeneration >>> # Initializing a ClvpConfig with susnato/clvp_dev style configuration >>> configuration = ClvpConfig() >>> # Initializing a ClvpModelForConditionalGeneration (with random weights) from the susnato/clvp_dev style configuration >>> model = ClvpModelForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a CLVPConfig from a CLVPTextConfig, CLVPSpeechConfig and a CLVPAutoRegressiveConfig >>> from transformers import ClvpEncoderConfig, ClvpDecoderConfig >>> # Initializing a CLVP text, CLVP speech and CLVP decoder configuration >>> config_text = ClvpEncoderConfig() >>> config_speech = ClvpEncoderConfig() >>> decoder_config = ClvpDecoderConfig() >>> config = ClvpConfig.from_sub_model_configs(config_text, config_speech, decoder_config) ```""" model_type = "clvp" sub_configs = { "text_config": ClvpEncoderConfig, "speech_config": ClvpEncoderConfig, "decoder_config": ClvpDecoderConfig, } def __init__( self, text_config=None, speech_config=None, decoder_config=None, projection_dim=768, logit_scale_init_value=2.6592, initializer_factor=1.0, **kwargs, ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `ClvpEncoderConfig` with default values.") if speech_config is None: speech_config = {} logger.info("`speech_config` is `None`. initializing the `ClvpEncoderConfig` with default values.") if decoder_config is None: decoder_config = {} logger.info("`decoder_config` is `None`. initializing the `ClvpDecoderConfig` with default values.") self.text_config = ClvpEncoderConfig(**text_config) self.speech_config = ClvpEncoderConfig(**speech_config) self.decoder_config = ClvpDecoderConfig(**decoder_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = initializer_factor @classmethod def from_sub_model_configs( cls, text_config: ClvpEncoderConfig, speech_config: ClvpEncoderConfig, decoder_config: ClvpDecoderConfig, **kwargs, ): r""" Instantiate a [`ClvpConfig`] (or a derived class) from CLVP text model configuration, CLVP speech model configuration and CLVP decoder model configuration. Args: text_config (`ClvpEncoderConfig`): Text model configuration of type [`ClvpEncoderConfig`]. speech_config (`ClvpEncoderConfig`): Speech model configuration of type [`ClvpEncoderConfig`]. decoder_config (`ClvpDecoderConfig`): Decoder model configuration of type [`ClvpDecoderConfig`]. Returns: [`ClvpConfig`]: An instance of a configuration object """ return cls( text_config=text_config.to_dict(), speech_config=speech_config.to_dict(), decoder_config=decoder_config.to_dict(), **kwargs, ) __all__ = ["ClvpConfig", "ClvpDecoderConfig", "ClvpEncoderConfig"]
transformers/src/transformers/models/clvp/configuration_clvp.py/0
{ "file_path": "transformers/src/transformers/models/clvp/configuration_clvp.py", "repo_id": "transformers", "token_count": 7890 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Conditional DETR checkpoints.""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) rename_keys = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"), ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"), ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"), ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"), ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"), ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"), ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"), ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"), ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"), ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"), ] ) def rename_key(state_dict, old, new): val = state_dict.pop(old) state_dict[new] = val def rename_backbone_keys(state_dict): new_state_dict = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model") new_state_dict[new_key] = value else: new_state_dict[key] = value return new_state_dict def read_in_q_k_v(state_dict, is_panoptic=False): prefix = "" if is_panoptic: prefix = "conditional_detr." # first: transformer encoder for i in range(6): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_conditional_detr_checkpoint(model_name, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our CONDITIONAL_DETR structure. """ # load default config config = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: config.backbone = "resnet101" if "dc5" in model_name: config.dilation = True is_panoptic = "panoptic" in model_name if is_panoptic: config.num_labels = 250 else: config.num_labels = 91 repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} # load image processor format = "coco_panoptic" if is_panoptic else "coco_detection" image_processor = ConditionalDetrImageProcessor(format=format) # prepare image img = prepare_img() encoding = image_processor(images=img, return_tensors="pt") pixel_values = encoding["pixel_values"] logger.info(f"Converting model {model_name}...") # load original model from torch hub conditional_detr = torch.hub.load("DeppMeng/ConditionalDETR", model_name, pretrained=True).eval() state_dict = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: src = "conditional_detr." + src rename_key(state_dict, src, dest) state_dict = rename_backbone_keys(state_dict) # query, key and value matrices need special treatment read_in_q_k_v(state_dict, is_panoptic=is_panoptic) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them prefix = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr") and not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor") ): val = state_dict.pop(key) state_dict["conditional_detr.model" + key[4:]] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: val = state_dict.pop(key) state_dict["conditional_detr." + key] = val elif key.startswith("bbox_attention") or key.startswith("mask_head"): continue else: val = state_dict.pop(key) state_dict[prefix + key] = val else: if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): val = state_dict.pop(key) state_dict[prefix + key] = val # finally, create HuggingFace model and load state dict model = ConditionalDetrForSegmentation(config) if is_panoptic else ConditionalDetrForObjectDetection(config) model.load_state_dict(state_dict) model.eval() model.push_to_hub(repo_id=model_name, organization="DepuMeng", commit_message="Add model") # verify our conversion original_outputs = conditional_detr(pixel_values) outputs = model(pixel_values) assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-4) assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-4) if is_panoptic: assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4) # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", default="conditional_detr_resnet50", type=str, help="Name of the CONDITIONAL_DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) args = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
transformers/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 6952 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for ConvNeXT.""" from typing import Dict, List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import ( BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BASE_IMAGE_PROCESSOR_FAST_DOCSTRING_PREPROCESS, BaseImageProcessorFast, DefaultFastImageProcessorInitKwargs, DefaultFastImageProcessorPreprocessKwargs, group_images_by_shape, reorder_images, ) from ...image_transforms import get_resize_output_image_size from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, ) from ...processing_utils import Unpack from ...utils import ( TensorType, add_start_docstrings, is_torch_available, is_torchvision_available, is_torchvision_v2_available, ) if is_torch_available(): import torch if is_torchvision_available(): if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F class ConvNextFastImageProcessorInitKwargs(DefaultFastImageProcessorInitKwargs): crop_pct: Optional[float] class ConvNextFastImageProcessorPreprocessKwargs(DefaultFastImageProcessorPreprocessKwargs): crop_pct: Optional[float] @add_start_docstrings( r"Constructs a fast ConvNeXT image processor.", BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, """ crop_pct (`float`, *optional*): Percentage of the image to crop. Only has an effect if size < 384. Can be overridden by `crop_pct` in the`preprocess` method. """, ) class ConvNextImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {"shortest_edge": 384} default_to_square = False do_resize = True do_rescale = True do_normalize = True crop_pct = 224 / 256 valid_init_kwargs = ConvNextFastImageProcessorInitKwargs valid_preprocess_kwargs = ConvNextFastImageProcessorPreprocessKwargs def __init__(self, **kwargs: Unpack[ConvNextFastImageProcessorInitKwargs]): super().__init__(**kwargs) @add_start_docstrings( BASE_IMAGE_PROCESSOR_FAST_DOCSTRING_PREPROCESS, """ crop_pct (`float`, *optional*): Percentage of the image to crop. Only has an effect if size < 384. Can be overridden by `crop_pct` in the`preprocess` method. """, ) def preprocess( self, images: ImageInput, **kwargs: Unpack[ConvNextFastImageProcessorPreprocessKwargs] ) -> BatchFeature: return super().preprocess(images, **kwargs) def resize( self, image: "torch.Tensor", size: Dict[str, int], crop_pct: float, interpolation: PILImageResampling = PILImageResampling.BICUBIC, **kwargs, ) -> "torch.Tensor": """ Resize an image. Args: image (`torch.Tensor`): Image to resize. size (`Dict[str, int]`): Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. crop_pct (`float`): Percentage of the image to crop. Only has an effect if size < 384. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. Returns: `torch.Tensor`: Resized image. """ if not size.shortest_edge: raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}") shortest_edge = size["shortest_edge"] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct resize_shortest_edge = int(shortest_edge / crop_pct) resize_size = get_resize_output_image_size( image, size=resize_shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST ) image = F.resize( image, resize_size, interpolation=interpolation, **kwargs, ) # then crop to (shortest_edge, shortest_edge) return F.center_crop( image, (shortest_edge, shortest_edge), **kwargs, ) else: # warping (no cropping) when evaluated at 384 or larger return F.resize( image, (shortest_edge, shortest_edge), interpolation=interpolation, **kwargs, ) def _preprocess( self, images: List["torch.Tensor"], do_resize: bool, size: Dict[str, int], crop_pct: float, interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: int, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, List[float]]], image_std: Optional[Union[float, List[float]]], return_tensors: Optional[Union[str, TensorType]], ) -> BatchFeature: # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize( image=stacked_images, size=size, crop_pct=crop_pct, interpolation=interpolation ) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_center_crop: stacked_images = self.center_crop(stacked_images, crop_size) # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["ConvNextImageProcessorFast"]
transformers/src/transformers/models/convnext/image_processing_convnext_fast.py/0
{ "file_path": "transformers/src/transformers/models/convnext/image_processing_convnext_fast.py", "repo_id": "transformers", "token_count": 3337 }
# coding=utf-8 # Copyright 2018 Salesforce and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Salesforce CTRL configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class CTRLConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`CTRLModel`] or a [`TFCTRLModel`]. It is used to instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [Salesforce/ctrl](https://huggingface.co/Salesforce/ctrl) architecture from SalesForce. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 246534): Vocabulary size of the CTRL model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`CTRLModel`] or [`TFCTRLModel`]. n_positions (`int`, *optional*, defaults to 256): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 1280): Dimensionality of the embeddings and hidden states. dff (`int`, *optional*, defaults to 8192): Dimensionality of the inner dimension of the feed forward networks (FFN). n_layer (`int`, *optional*, defaults to 48): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. layer_norm_epsilon (`float`, *optional*, defaults to 1e-06): The epsilon to use in the layer normalization layers initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Examples: ```python >>> from transformers import CTRLConfig, CTRLModel >>> # Initializing a CTRL configuration >>> configuration = CTRLConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = CTRLModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "ctrl" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, vocab_size=246534, n_positions=256, n_embd=1280, dff=8192, n_layer=48, n_head=16, resid_pdrop=0.1, embd_pdrop=0.1, layer_norm_epsilon=1e-6, initializer_range=0.02, use_cache=True, **kwargs, ): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.dff = dff self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache super().__init__(**kwargs) __all__ = ["CTRLConfig"]
transformers/src/transformers/models/ctrl/configuration_ctrl.py/0
{ "file_path": "transformers/src/transformers/models/ctrl/configuration_ctrl.py", "repo_id": "transformers", "token_count": 1722 }
# coding=utf-8 # Copyright 2024 Descript and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for DAC""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging logger = logging.get_logger(__name__) class DacFeatureExtractor(SequenceFeatureExtractor): r""" Constructs an Dac feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. Use 1 for mono, 2 for stereo. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio waveform should be digitalized, expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used for padding. hop_length (`int`, *optional*, defaults to 512): Overlap length between successive windows. """ model_input_names = ["input_values", "n_quantizers"] def __init__( self, feature_size: int = 1, sampling_rate: int = 16000, padding_value: float = 0.0, hop_length: int = 512, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.hop_length = hop_length def __call__( self, raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Optional[Union[bool, str, PaddingStrategy]] = None, truncation: Optional[bool] = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio (`feature_size = 2`). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, *optional*, defaults to `False`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). return_tensors (`str` or [`~utils.TensorType`], *optional*, default to 'pt'): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one.") elif padding is None: # by default let's pad the inputs padding = True is_batched = bool( isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list))) ) if is_batched: raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio] elif not is_batched and not isinstance(raw_audio, np.ndarray): raw_audio = np.asarray(raw_audio, dtype=np.float32) elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64): raw_audio = raw_audio.astype(np.float32) # always return batch if not is_batched: raw_audio = [np.asarray(raw_audio).T] # verify inputs are valid for idx, example in enumerate(raw_audio): if example.ndim > 2: raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}") if self.feature_size == 1 and example.ndim != 1: raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels") if self.feature_size == 2: raise ValueError("Stereo audio isn't supported for now") input_values = BatchFeature({"input_values": raw_audio}) # normal padding on batch padded_inputs = self.pad( input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=False, pad_to_multiple_of=self.hop_length, ) if padding: padded_inputs.input_values = padded_inputs.input_values[:, np.newaxis, :] input_values = [] for example in padded_inputs.pop("input_values"): if self.feature_size == 1: example = example[..., None] input_values.append(example.T) padded_inputs["input_values"] = input_values if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs __all__ = ["DacFeatureExtractor"]
transformers/src/transformers/models/dac/feature_extraction_dac.py/0
{ "file_path": "transformers/src/transformers/models/dac/feature_extraction_dac.py", "repo_id": "transformers", "token_count": 3213 }
# coding=utf-8 # Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ErnieM model configuration""" # Adapted from original paddlenlp repository.(https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/transformers/ernie_m/configuration.py) from __future__ import annotations from typing import Dict from ....configuration_utils import PretrainedConfig class ErnieMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ErnieMModel`]. It is used to instantiate a Ernie-M model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `Ernie-M` [susnato/ernie-m-base_pytorch](https://huggingface.co/susnato/ernie-m-base_pytorch) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 250002): Vocabulary size of `inputs_ids` in [`ErnieMModel`]. Also is the vocab size of token embedding matrix. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ErnieMModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the embedding layer, encoder layers and pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors to feed-forward layers are firstly projected from hidden_size to intermediate_size, and then projected back to hidden_size. Typically intermediate_size is larger than hidden_size. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the feed-forward layer. `"gelu"`, `"relu"` and any other torch supported activation functions are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability used in `MultiHeadAttention` in all encoder layers to drop some attention target. max_position_embeddings (`int`, *optional*, defaults to 514): The maximum value of the dimensionality of position encoding, which dictates the maximum supported length of an input sequence. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the normal initializer for initializing all weight matrices. The index of padding token in the token vocabulary. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. act_dropout (`float`, *optional*, defaults to 0.0): This dropout probability is used in `ErnieMEncoderLayer` after activation. A normal_initializer initializes weight matrices as normal distributions. See `ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`. """ model_type = "ernie_m" attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self, vocab_size: int = 250002, hidden_size: int = 768, num_hidden_layers: int = 12, num_attention_heads: int = 12, intermediate_size: int = 3072, hidden_act: str = "gelu", hidden_dropout_prob: float = 0.1, attention_probs_dropout_prob: float = 0.1, max_position_embeddings: int = 514, initializer_range: float = 0.02, pad_token_id: int = 1, layer_norm_eps: float = 1e-05, classifier_dropout=None, act_dropout=0.0, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.classifier_dropout = classifier_dropout self.act_dropout = act_dropout
transformers/src/transformers/models/deprecated/ernie_m/configuration_ernie_m.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/ernie_m/configuration_ernie_m.py", "repo_id": "transformers", "token_count": 2128 }
# coding=utf-8 # Copyright 2022 The OpenAI Team Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Jukebox model.""" import math import os from typing import List, Optional, Tuple import numpy as np import torch import torch.nn.functional as F from torch import nn from torch.nn import LayerNorm as FusedLayerNorm from ....activations import ACT2FN from ....modeling_utils import PreTrainedModel from ....utils import add_start_docstrings, logging from ....utils.logging import tqdm from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig logger = logging.get_logger(__name__) def filter_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits (`torch.Tensor`): logits distribution shape (vocabulary size) top_k (`int`, *optional*, defaults to 0): When `top_k >0` keep only top key tokens with highest probability (top-k filtering). top_p (`int`, *optional*, defaults to 0): When `top_p>0.0` keep the top tokens with cumulative probability >= `top_p` (nucleus filtering). """ logits = logits.clone() top_k = min(top_k, logits.size(-1)) # Safety check if top_k > 0: # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < torch.topk(logits, top_k, dim=-1)[0][..., -1:] logits[indices_to_remove] = filter_value if top_p > 0.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probs > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # indices_to_remove = sorted_indices[sorted_indices_to_remove] indices_to_remove = torch.zeros_like(logits, dtype=torch.bool).scatter_( dim=-1, index=sorted_indices, src=sorted_indices_to_remove ) logits[indices_to_remove] = filter_value return logits def get_relevant_lyric_tokens(full_tokens, max_n_lyric_tokens, total_length, offset, duration): """ Extract only the relevant tokens based on the character position. A total of `max_n_lyric_tokens` tokens will be returned. If the provided token sequence is smaller, it will be padded, otherwise, only characters ranging from the midpoint - `max_n_lyric_tokens//2` to the midpoint + `max_n_lyric_tokens//2` will be returned. This *focuses* on the most relevant tokens (in time) for the sequence. Args: full_tokens (`List[int]`): List containing the token ids of the entire lyrics. total_length (`int`): Total expected length of the music (not all of it is generated, see duration), in samples. offset (`int`): Starting sample in the music. If the offset is greater than 0, the lyrics will be shifted take that into account duration (`int`): Expected duration of the generated music, in samples. The duration has to be smaller than the total length, which represent the overall length of the signal, """ full_tokens = full_tokens[0] if len(full_tokens) < max_n_lyric_tokens: tokens = torch.cat( [torch.zeros(max_n_lyric_tokens - len(full_tokens), dtype=torch.long).to(full_tokens.device), full_tokens] ) indices = [-1] * (max_n_lyric_tokens - len(full_tokens)) + list(range(0, len(full_tokens))) else: midpoint = int(len(full_tokens) * (offset + duration / 2.0) / total_length) midpoint = min(max(midpoint, max_n_lyric_tokens // 2), len(full_tokens) - max_n_lyric_tokens // 2) tokens = full_tokens[midpoint - max_n_lyric_tokens // 2 : midpoint + max_n_lyric_tokens // 2] indices = list(range(midpoint - max_n_lyric_tokens // 2, midpoint + max_n_lyric_tokens // 2)) return tokens.unsqueeze(dim=0), indices # Break total_length into hops/windows of size n_ctx separated by hop_length def get_starts(total_length, n_ctx, hop_length): starts = [] for start in range(0, total_length - n_ctx + hop_length, hop_length): if start + n_ctx >= total_length: # Last hop could be smaller, we make it n_ctx to maximise context start = total_length - n_ctx starts.append(start) return starts def get_alignment(music_tokens, labels, prior, config): level = prior.levels - 1 # Top level used n_ctx = prior.n_ctx tokens = music_tokens[level] batch_size, total_length = tokens.shape[0], tokens.shape[1] if total_length < n_ctx: padding_length = n_ctx - total_length tokens = torch.cat( [tokens, torch.zeros(batch_size, n_ctx - total_length, dtype=tokens.dtype, device=tokens.device)], dim=1 ) total_length = tokens.shape[1] else: padding_length = 0 hop_length = int(config.hop_fraction[-level - 1] * prior.n_ctx) alignment_head, alignment_layer = config.prior_alignment_head[0], config.prior_alignment_layer[0] attn_layers = {alignment_layer} alignment_hops = {} indices_hops = {} for start in tqdm(get_starts(total_length, n_ctx, hop_length), desc="Computing lyric to music alignment "): end = start + n_ctx # set metadata offset, sample_length and lyrics tokens metadata, indices_hop = prior.get_metadata(labels, start, config.sample_length, get_indices=True, offset=0) tokens_bs = torch.chunk(tokens, batch_size, dim=0) metadata_bs = torch.chunk(metadata, batch_size, dim=0) w_hops = [] for tokens_i, metadata_i in zip(tokens_bs, metadata_bs): w_hop = prior.forward_tokens(tokens_i[:, start:end], [], metadata_i, get_attn_weights=attn_layers) w_hops.append(w_hop[0][:, alignment_head]) del w_hop weights = torch.cat(w_hops, dim=0) del w_hops alignment_hop = weights.float().cpu().numpy() del weights # alignment_hop has shape (bs, n_ctx, nb_relevant_lyric_tokens) # indices_hop is a list of len=bs, each entry of len hps.nb_relevant_lyric_tokens indices_hops[start] = indices_hop alignment_hops[start] = alignment_hop # Combine attn for each hop into attn for full range # Use indices to place them into correct place for corresponding source tokens alignments = [] for item in range(batch_size): # Note each item has different length lyrics full_tokens = labels[0, 3:] alignment = np.zeros((total_length, len(full_tokens) + 1)) for start in reversed(get_starts(total_length, n_ctx, hop_length)): end = start + n_ctx alignment_hop = alignment_hops[start][item] indices = indices_hops[start][item] alignment[start:end, indices] = alignment_hop alignment = alignment[: total_length - padding_length, :-1] # remove token padding, and last lyric index alignments.append(alignment) return alignments def save_temp_audio(fname, lvl, metas, aud): aud = torch.clamp(aud, -1, 1).cpu().numpy() for i in list(range(aud.shape[0])): if metas is not None: artists, genres, lyrics = list(metas)[i].values() path = f"{fname}/lvl_{lvl}-{artists}-{genres}-{lyrics[:5]}-{i}" np.save(path, aud[i]) else: np.save(f"{fname}/lvl_{lvl}-sample-{i}", aud[i]) def get_mask(mask, query_length, key_value_length, blocks, spread, device, sample, sample_t): # returns a mask of shape 1 x 1 x query_length x key_value_length or None if masking is not needed. if mask is None or query_length == 1: return None offset = sample_t - query_length if sample else max(key_value_length - query_length, 0) if mask == "autoregressive": # Masked dense mask = torch.ones(query_length, key_value_length, device=device).tril(offset) elif mask == "summary": # Masked summary mask = torch.ones(query_length, query_length, device=device).tril() mask = torch.ones(query_length, query_length, device=device).tril() mask = mask.view(query_length, blocks, query_length // blocks)[:, :-1, -key_value_length // blocks :] mask = ( torch.nn.functional.pad( mask, (0, 0, 1, 0), value=1, ) .contiguous() .view(query_length, key_value_length) ) elif mask == "prime": mask = torch.ones(query_length, key_value_length, device=device).tril(offset) return mask.view(1, 1, query_length, key_value_length) class JukeboxConv1D(nn.Module): def __init__(self, input_width, output_width): super().__init__() self.input_width = input_width self.output_width = output_width weight = torch.empty(input_width, output_width) bias = torch.zeros(output_width) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) def forward(self, hidden_states): size_out = (*hidden_states.size()[:-1], self.output_width) hidden_states = torch.addmm( self.bias.type_as(hidden_states), hidden_states.view(-1, hidden_states.size(-1)), self.weight.type_as(hidden_states), ) hidden_states = hidden_states.view(*size_out) return hidden_states class JukeboxResConv1DBlock(nn.Module): def __init__(self, config, conv_width, depth=1, res_scale=1.0): super().__init__() hidden_dim = config.res_convolution_multiplier * conv_width dilation = config.res_dilation_growth_rate**depth padding = dilation self.res_scale = res_scale self.activation = nn.ReLU() self.conv1d_1 = nn.Conv1d(conv_width, hidden_dim, 3, 1, padding, dilation) self.conv1d_2 = nn.Conv1d(hidden_dim, conv_width, 1, 1, 0) def forward(self, hidden_states): residuals = hidden_states hidden_states = self.activation(hidden_states) hidden_states = self.conv1d_1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv1d_2(hidden_states) return residuals + self.res_scale * hidden_states class JukeboxResnet1D(nn.Module): def __init__(self, config, conv_width, n_depth, reverse_dilation=False): super().__init__() self.dilation_cycle = config.res_dilation_cycle res_scale = 1.0 if not config.conv_res_scale else 1.0 / math.sqrt(n_depth) blocks = [] for depth in range(n_depth): block_depth = depth if self.dilation_cycle is None else depth % self.dilation_cycle blocks.append(JukeboxResConv1DBlock(config, conv_width, block_depth, res_scale)) if reverse_dilation: blocks = blocks[::-1] self.resnet_block = nn.ModuleList(blocks) def forward(self, hidden_states): for block in self.resnet_block: hidden_states = block(hidden_states) return hidden_states class JukeboxEncoderConvBlock(nn.Module): def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t): super().__init__() blocks = [] filter_t = stride_t * 2 pad_t = stride_t // 2 if down_t > 0: for i in range(down_t): blocks.append(nn.Conv1d(embed_dim if i == 0 else hidden_dim, hidden_dim, filter_t, stride_t, pad_t)) blocks.append(JukeboxResnet1D(config, hidden_dim, depth)) self.proj_out = nn.Conv1d(hidden_dim, config.embed_dim, 3, 1, 1) self.downsample_block = nn.ModuleList(blocks) def forward(self, hidden_states): for block in self.downsample_block: hidden_states = block(hidden_states) hidden_states = self.proj_out(hidden_states) return hidden_states class JukeboxEncoder(nn.Module): def __init__(self, config, width, depth, levels, downs_t, strides_t): super().__init__() self.levels = levels self.level_blocks = nn.ModuleList() iterator = zip(list(range(self.levels)), downs_t, strides_t) for i, down_t, stride_t in iterator: self.level_blocks.append( JukeboxEncoderConvBlock( config, config.conv_input_shape if i == 0 else config.embed_dim, width, depth, down_t, stride_t ) ) def forward(self, hidden_states): all_hidden_states = [] # 64, 32, ... for level in range(self.levels): level_block = self.level_blocks[level] hidden_states = level_block(hidden_states) all_hidden_states.append(hidden_states) return all_hidden_states class JukeboxDecoderConvBock(nn.Module): def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t, reverse_dilation=True): self.embed_dim = embed_dim self.hidden_dim = hidden_dim super().__init__() blocks = [] if down_t > 0: filter_t = stride_t * 2 pad_t = stride_t // 2 self.proj_in = nn.Conv1d(embed_dim, hidden_dim, 3, 1, 1) for i in range(down_t): blocks.append(JukeboxResnet1D(config, hidden_dim, depth, reverse_dilation)) blocks.append( nn.ConvTranspose1d( hidden_dim, hidden_dim if i < down_t - 1 else embed_dim, filter_t, stride_t, pad_t ) ) self.upsample_block = nn.ModuleList(blocks) def forward(self, hidden_states): hidden_states = self.proj_in(hidden_states) for block in self.upsample_block: hidden_states = block(hidden_states) return hidden_states class JukeboxDecoder(nn.Module): def __init__(self, config, hidden_dim, depth, levels, downs_t, strides_t): super().__init__() self.levels = levels self.level_blocks = nn.ModuleList() for level, down_t, stride_t in zip(list(range(self.levels)), downs_t, strides_t): self.level_blocks.append( JukeboxDecoderConvBock(config, config.embed_dim, hidden_dim, depth, down_t, stride_t) ) self.out = nn.Conv1d(config.embed_dim, config.conv_input_shape, 3, 1, 1) def forward(self, hidden_states, all_levels=True): hidden_state = hidden_states[-1] # 32, 64 ... for level in reversed(range(self.levels)): level_block = self.level_blocks[level] hidden_state = level_block(hidden_state) if level != 0 and all_levels: hidden_state = hidden_state + hidden_states[level - 1] hidden_state = self.out(hidden_state) return hidden_state class JukeboxBottleneckBlock(nn.Module): def __init__(self, config: JukeboxVQVAEConfig): super().__init__() self.nb_discrete_codes = config.nb_discrete_codes self.codebook_width = config.embed_dim self.mu = config.lmu self.threshold = 1.0 self.init = False self.codebook_sum = None self.codebook_elem = None self.register_buffer("codebook", torch.zeros(self.nb_discrete_codes, self.codebook_width)) def _tile(self, hidden_states): dim, embed_width = hidden_states.shape if dim < self.nb_discrete_codes: n_repeats = (self.nb_discrete_codes + dim - 1) // dim std = 0.01 / np.sqrt(embed_width) hidden_states = hidden_states.repeat(n_repeats, 1) hidden_states = hidden_states + torch.randn_like(hidden_states) * std return hidden_states def init_codebook(self, hidden_states): nb_discrete_codes = self.nb_discrete_codes self.init = True codes = self._tile(hidden_states) self.codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes] self.codebook_sum = self.codebook self.codebook_elem = torch.ones(nb_discrete_codes, device=self.codebook.device) def update_codebook(self, hidden_states, latent_states): mu, codebook_width, nb_discrete_codes = self.mu, self.codebook_width, self.nb_discrete_codes with torch.no_grad(): # Calculate new centres # nb_discrete_codes, batch_size * seq_length latent_states_onehot = torch.zeros(nb_discrete_codes, hidden_states.shape[0], device=hidden_states.device) latent_states_onehot.scatter_(0, latent_states.view(1, hidden_states.shape[0]), 1) _codebook_sum = torch.matmul(latent_states_onehot, hidden_states) _codebook_elem = latent_states_onehot.sum(dim=-1) # nb_discrete_codes codes = self._tile(hidden_states) _random_codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes] # Update centres old_codebook = self.codebook self.codebook_sum = mu * self.codebook_sum + (1.0 - mu) * _codebook_sum self.codebook_elem = mu * self.codebook_elem + (1.0 - mu) * _codebook_elem # nb_discrete_codes usage = (self.codebook_elem.view(nb_discrete_codes, 1) >= self.threshold).float() norm_code = self.codebook_sum.view(nb_discrete_codes, codebook_width) / self.codebook_elem.view( nb_discrete_codes, 1 ) self.codebook = usage * (norm_code) + (1 - usage) * _random_codebook _codebook_prob = _codebook_elem / torch.sum(_codebook_elem) # prob of each bin entropy = -torch.sum(_codebook_prob * torch.log(_codebook_prob + 1e-8)) # entropy ie how diverse used_curr = (_codebook_elem >= self.threshold).sum() usage = torch.sum(usage) dk = torch.norm(self.codebook - old_codebook) / np.sqrt(np.prod(old_codebook.shape)) return {"entropy": entropy, "used_curr": used_curr, "usage": usage, "dk": dk} def preprocess(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1).contiguous() hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) if hidden_states.shape[-1] == self.codebook_width: prenorm = torch.norm(hidden_states - torch.mean(hidden_states)) / np.sqrt(np.prod(hidden_states.shape)) elif hidden_states.shape[-1] == 2 * self.codebook_width: x1, x2 = hidden_states[..., : self.codebook_width], hidden_states[..., self.codebook_width :] prenorm = (torch.norm(x1 - torch.mean(x1)) / np.sqrt(np.prod(x1.shape))) + ( torch.norm(x2 - torch.mean(x2)) / np.sqrt(np.prod(x2.shape)) ) # Normalise hidden_states = x1 + x2 return hidden_states, prenorm def postprocess(self, latent_states, dequantised_states, x_shape): batch_size, time = x_shape dequantised_states = dequantised_states.view(batch_size, time, -1).permute(0, 2, 1).contiguous() latent_states = latent_states.view(batch_size, time) return latent_states, dequantised_states def quantise(self, latent_states): # Calculate latent code latent_states codebook_weights = self.codebook.t() distance = ( torch.sum(latent_states**2, dim=-1, keepdim=True) - 2 * torch.matmul(latent_states, codebook_weights) + torch.sum(codebook_weights**2, dim=0, keepdim=True) ) # (batch_size * latent_states , codebook_weights) min_distance, music_tokens = torch.min(distance, dim=-1) fit = torch.mean(min_distance) return music_tokens, fit def dequantise(self, music_tokens): dequantised_states = F.embedding(music_tokens, self.codebook) return dequantised_states def encode(self, latent_states): samples, _, seq_len = latent_states.shape # Preprocess. latent_states, _ = self.preprocess(latent_states) # Quantise music_tokens, _ = self.quantise(latent_states) # Postprocess. music_tokens = music_tokens.view(samples, seq_len) return music_tokens def decode(self, music_tokens): samples, seq_len = music_tokens.shape # Dequantise dequantised_states = self.dequantise(music_tokens) # Postprocess dequantised_states = ( dequantised_states.view(samples, seq_len, self.codebook_width).permute(0, 2, 1).contiguous() ) return dequantised_states def forward(self, hidden_states, update_codebook=True): samples, _, seq_len = hidden_states.shape # Preprocess hidden_states, prenorm = self.preprocess(hidden_states) # Init codebook if not inited if update_codebook and not self.init: self.init_codebook(hidden_states) # Quantise and dequantise through bottleneck music_tokens, fit = self.quantise(hidden_states) dequantised_states = self.dequantise(music_tokens) # Update embeddings if update_codebook: update_metrics = self.update_codebook(hidden_states, music_tokens) else: update_metrics = {} # Loss commit_loss = torch.norm(dequantised_states.detach() - hidden_states) ** 2 / np.prod(hidden_states.shape) # Passthrough dequantised_states = hidden_states + (dequantised_states - hidden_states).detach() # Postprocess music_tokens, dequantised_states = self.postprocess(music_tokens, dequantised_states, (samples, seq_len)) return music_tokens, dequantised_states, commit_loss, dict(fit=fit, pn=prenorm, **update_metrics) class JukeboxBottleneck(nn.Module): def __init__(self, config, levels): super().__init__() self.levels = levels self.level_blocks = nn.ModuleList() for level in range(self.levels): self.level_blocks.append(JukeboxBottleneckBlock(config)) def encode(self, raw_audio): music_tokens = [ level_block.encode(hidden_states) for (level_block, hidden_states) in zip(self.level_blocks, raw_audio) ] return music_tokens def decode(self, music_tokens, start_level=0, end_level=None): if end_level is None: end_level = self.levels quantised_audio = [ level_block.decode(z) for (level_block, z) in zip(self.level_blocks[start_level:end_level], music_tokens) ] return quantised_audio def forward(self, input_audio): music_tokens, quantised_states, commit_losses, metrics = [], [], [], [] for level in range(self.levels): level_block = self.level_blocks[-level - 1] hidden_states = input_audio[level] sampled_tokens, quantised_state, commit_loss, metric = level_block( hidden_states, update_codebook=self.training ) music_tokens.append(sampled_tokens) if not self.training: # Be extra paranoid and make sure the encoder weights can't # change from straight-through estimator quantised_state = quantised_state.detach() quantised_states.append(quantised_state) commit_losses.append(commit_loss) if self.training: metrics.append(metric) return music_tokens, quantised_states, commit_losses, metrics JUKEBOX_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (`JukeboxConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( """The Hierarchical VQ-VAE model used in Jukebox. This model follows the Hierarchical VQVAE paper from [Will Williams, Sam Ringer, Tom Ash, John Hughes, David MacLeod, Jamie Dougherty](https://arxiv.org/abs/2002.08111). """, JUKEBOX_START_DOCSTRING, ) class JukeboxVQVAE(PreTrainedModel): config_class = JukeboxVQVAEConfig base_model_prefix = "vqvae" def _init_weights(self, module): if isinstance(module, nn.Embedding): # embed_tokens module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale) elif isinstance(module, JukeboxConv1D): if self.config.zero_out: module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale) elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out: module.conv1d_2.weight.data.zero_() module.conv1d_2.bias.data.zero_() if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def __init__(self, config: JukeboxVQVAEConfig): super().__init__(config) downs_t = config.res_downs_t strides_t = config.res_strides_t if not config.sample_length: downsamples = [stride**down for stride, down in zip(strides_t, downs_t)] top_raw_to_tokens = np.prod(downsamples) config.sample_length = ( config.sample_length_in_seconds * config.sampling_rate // top_raw_to_tokens ) * top_raw_to_tokens config.sample_length = config.sample_length.astype(int) self.nb_discrete_codes = config.nb_discrete_codes self.commit = config.commit self.sample_length = config.sample_length self.downsamples = [stride**down for stride, down in zip(strides_t, downs_t)] self.hop_lengths = np.cumprod(self.downsamples) self.levels = levels = config.levels self.music_tokens_shapes = [ (int(self.sample_length // self.hop_lengths[-level - 1])) for level in range(levels) ] self.multipliers = config.multipliers if config.multipliers is not None else [1] * levels self.encoders = nn.ModuleList() self.decoders = nn.ModuleList() for level in range(levels): width = config.res_conv_width * self.multipliers[level] depth = config.res_conv_depth * self.multipliers[level] self.encoders.append( JukeboxEncoder(config, width, depth, level + 1, downs_t[: level + 1], strides_t[: level + 1]) ) self.decoders.append( JukeboxDecoder(config, width, depth, level + 1, downs_t[: level + 1], strides_t[: level + 1]) ) self.bottleneck = JukeboxBottleneck(config, levels) def _decode(self, music_tokens, start_level=0, end_level=None): # Decode if end_level is None: end_level = self.levels latent_states = self.bottleneck.decode(music_tokens, start_level=start_level, end_level=end_level) # Use only lowest level decoder, dequantised_state = self.decoders[start_level], latent_states[0:1] dequantised_state = decoder(dequantised_state, all_levels=False) dequantised_state = dequantised_state.permute(0, 2, 1) return dequantised_state def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1) -> torch.Tensor: """ Transforms the input `music_tokens` to their `raw_audio` representation. Args: music_tokens (`torch.LongTensor`): Tensor of music tokens which will be decoded to raw audio by using the codebook. Each music token should be an index to a corresponding `code` vector in the codebook. start_level (`int`, *optional*): Level at which the decoding process will start. Default to 0. end_level (`int`, *optional*): Level at which the decoding process will start. Default to None. bs_chunks (int, *optional*): Number of chunks to process at the same time. """ token_chunks = [torch.chunk(token, bs_chunks, dim=0) for token in music_tokens] dequantised_states = [] for i in range(bs_chunks): music_tokens_i = [chunks[i] for chunks in token_chunks] dequantised_state = self._decode(music_tokens_i, start_level=start_level, end_level=end_level) dequantised_states.append(dequantised_state) return torch.cat(dequantised_states, dim=0) def _encode(self, raw_audio, start_level=0, end_level=None): # Encode if end_level is None: end_level = self.levels input_audio = raw_audio.permute(0, 2, 1).float() latent_states = [] for level in range(self.levels): encoder = self.encoders[level] latent_state = encoder(input_audio) latent_states.append(latent_state[-1]) music_tokens = self.bottleneck.encode(latent_states) return music_tokens[start_level:end_level] def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1): """ Transforms the `input_audio` to a discrete representation made out of `music_tokens`. Args: input_audio (`torch.Tensor`): Raw audio which will be encoded to its discrete representation using the codebook. The closest `code` form the codebook will be computed for each sequence of samples. start_level (`int`, *optional*, defaults to 0): Level at which the encoding process will start. Default to 0. end_level (`int`, *optional*): Level at which the encoding process will start. Default to None. bs_chunks (int, *optional*, defaults to 1): Number of chunks of raw audio to process at the same time. """ audio_chunks = torch.chunk(input_audio, bs_chunks, dim=0) music_tokens_list = [] for chunk_i in audio_chunks: music_tokens_i = self._encode(chunk_i, start_level=start_level, end_level=end_level) music_tokens_list.append(music_tokens_i) music_tokens = [torch.cat(music_tokens_level, dim=0) for music_tokens_level in zip(*music_tokens_list)] return music_tokens def sample(self, n_samples): music_tokens = [ torch.randint(0, self.nb_discrete_codes, size=(n_samples, *music_tokens_shape), device="cpu") for music_tokens_shape in self.music_tokens_shapes ] return self.decode(music_tokens) def forward(self, raw_audio: torch.FloatTensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Forward pass of the VQ-VAE, encodes the `raw_audio` to latent states, which are then decoded for each level. The commit loss, which ensure that the encoder's computed embeddings are close to the codebook vectors, is computed. Args: raw_audio (`torch.FloatTensor`): Audio input which will be encoded and decoded. Returns: `Tuple[torch.Tensor, torch.Tensor]` Example: ```python >>> from transformers import JukeboxVQVAE, set_seed >>> import torch >>> model = JukeboxVQVAE.from_pretrained("openai/jukebox-1b-lyrics").eval() >>> set_seed(0) >>> zs = [torch.randint(100, (4, 1))] >>> model.decode(zs).shape torch.Size([4, 8, 1]) ``` """ # Encode/Decode input_audio = raw_audio.permute(0, 2, 1).float() latent_states = [] for level in range(self.levels): encoder = self.encoders[level] latent_state = encoder(input_audio) latent_states.append(latent_state[-1]) _, music_tokens, commit_losses, _ = self.bottleneck(latent_states) dequantised_states = [] for level in range(self.levels): decoder = self.decoders[level] dequantised_state = decoder(music_tokens[level : level + 1], all_levels=False) dequantised_states.append(dequantised_state.permute(0, 2, 1)) commit_loss = sum(commit_losses) loss = self.commit * commit_loss return dequantised_states, loss class JukeboxMLP(nn.Module): def __init__(self, config): # a single channel is always used in original code super().__init__() embed_dim = config.hidden_size hidden_dim = int(config.mlp_multiplier * embed_dim) self.c_fc = JukeboxConv1D(embed_dim, hidden_dim) self.c_proj = JukeboxConv1D(hidden_dim, embed_dim) self.act = ACT2FN[config.act_fn] self.dropout = nn.Dropout(config.resid_dropout) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class JukeboxLayerNorm(FusedLayerNorm): def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True): super().__init__(normalized_shape, eps=eps, elementwise_affine=elementwise_affine) self.width = np.prod(normalized_shape) self.max_numel = 65535 * self.width def forward(self, input): if input.numel() > self.max_numel: return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps).type_as(input) else: return super().forward(input).type_as(input) class JukeboxAttention(nn.Module): def __init__(self, config, n_ctx, attn_func="dense_attn"): super().__init__() self.embed_dim = config.hidden_size self.n_heads = config.n_heads self.dropout = config.attn_dropout hidden_dim = int(config.attention_multiplier * self.embed_dim) self.head_dim = hidden_dim // config.n_heads self.n_ctx = n_ctx self.hidden_dim = hidden_dim self.scale = self.head_dim**-0.25 self.mask = config.mask if attn_func == "cross_attention": self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim) self.c_enc_kv = JukeboxConv1D(self.embed_dim, hidden_dim * 2) else: self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim * 3) self.c_proj = JukeboxConv1D(hidden_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_dropout) self.resid_dropout = nn.Dropout(config.resid_dropout) # Sequence of length seq_len is factored as [blocks, seq_len // blocks] self.attn_func = attn_func if attn_func == "cross_attention": self.qkv = self.decode_qkv elif attn_func == "prime_attn": self.qkv = self.prime_qkv else: self.qkv = self.factored_qkv ATTENTION_MAP = { "dense_attn": (self.dense_attn, "autoregressive"), "block_attn": (self.block_attn, "autoregressive"), "transpose_block_attn": (self.transpose_block_attn, "autoregressive"), "prev_block_attn": (self.prev_block_attn, None), "summary_attn": (self.summary_attn, "summary"), "summary_spread_attn": (self.summary_spread_attn, "summary"), "cross_attention": (self.dense_attn, None), "prime_attn": (self.prime_attn, "prime"), } self.attn, self.attn_mask = ATTENTION_MAP[attn_func] self.blocks = config.blocks self.spread = config.spread if self.blocks is not None: self.block_ctx = self.n_ctx // self.blocks self.sample_t = 0 self.cache = {} self.encoder_len = config.nb_relevant_lyric_tokens # length of the encoder input ids self.record_attn = False def _attn(self, query_states, key_states, value_states, sample): scale = self.scale if self.training: attention_weight = torch.matmul(query_states * scale, key_states * scale) else: attention_weight = torch.matmul(query_states, key_states) attention_weight.mul_(scale * scale) attn_weight_type = attention_weight.dtype attention_weight = attention_weight.float() if self.mask: # Generate appropriate mask to mask out all positions before current # Might take up lot of memory for dense, so can cache it mask = get_mask( self.attn_mask, query_states.size(-2), key_states.size(-1), self.blocks, self.spread, attention_weight.device, sample, self.sample_t, ) if mask is not None: attention_weight = attention_weight * mask + -1e9 * (1 - mask) attention_prob = F.softmax(attention_weight, dim=-1).type(attn_weight_type) if self.record_attn: self.attention_prob = attention_prob if self.attn_func == "prime_attn": # only keep music queries and lyrics keys/values self.attention_prob = self.attention_prob[:, :, self.encoder_len :, : self.encoder_len] attention_prob = self.attn_dropout(attention_prob) context_states = torch.matmul(attention_prob, value_states) return context_states def merge_heads(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous() new_hidden_states_shape = (*hidden_states.size()[:-2], hidden_states.size(-2) * hidden_states.size(-1)) return hidden_states.view(*new_hidden_states_shape) # in Tensorflow implem: fct merge_states def split_heads(self, hidden_states, is_key=False): new_hidden_states_shape = ( *hidden_states.size()[:-1], self.n_heads, hidden_states.size(-1) // self.n_heads, ) hidden_states = hidden_states.view(*new_hidden_states_shape) # in Tensorflow implem: fct split_states if is_key: return hidden_states.permute(0, 2, 3, 1) else: return hidden_states.permute(0, 2, 1, 3) def dense_attn(self, query, key, value, sample): query = self.split_heads(query) key = self.split_heads(key, is_key=True) value = self.split_heads(value) context_states = self._attn(query, key, value, sample) context_states = self.merge_heads(context_states) return context_states def block_attn(self, query, key, value, sample): block_ctx = self.block_ctx batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t if sample: return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: query_length = query.shape[1] query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim) if query_length < seq_len: seq_len = query_length key = key[:, -seq_len:].contiguous() value = value[:, -seq_len:].contiguous() key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def transpose_block_attn(self, query, key, value, sample): block_ctx = self.block_ctx batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t if sample: block_len = (seq_len - 1) % block_ctx key = key[:, block_len::block_ctx, :] value = value[:, block_len::block_ctx, :] return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: query_length = query.shape[1] query = query.view(batch_size, query_length // block_ctx, block_ctx, embed_dim) query = query.transpose(1, 2).contiguous() query = query.view(batch_size * block_ctx, query_length // block_ctx, embed_dim) key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim) key = key.transpose(1, 2).contiguous() key = key.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim) value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim) value = value.transpose(1, 2).contiguous() value = value.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim) block_attn = self.dense_attn(query, key, value, sample) block_attn = block_attn.view(batch_size, block_ctx, query_length // block_ctx, embed_dim) block_attn = block_attn.transpose(1, 2).contiguous() block_attn = block_attn.view(batch_size, query_length, embed_dim) return block_attn def prev_block_attn(self, query, key, value, sample): block_ctx = self.block_ctx batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t if sample: block = (seq_len - 1) // block_ctx prev_l = (block - 1) * block_ctx if block > 0: key = key[:, prev_l : prev_l + block_ctx, :] value = value[:, prev_l : prev_l + block_ctx, :] else: key = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype) value = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype) return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: query_length = query.shape[1] query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim) key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :] key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0)) key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :] value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0)) value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) if query_length < seq_len: nb_query_blocks = query_length // block_ctx nb_key_blocks = seq_len // block_ctx seq_len = query_length key = key.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:] key = key.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim) value = value.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:] value = value.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim) return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def summary_attn(self, query, key, value, sample): blocks = self.blocks block_ctx = self.block_ctx batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t if sample: key = key[:, block_ctx - 1 : blocks * block_ctx - 1 : block_ctx, :] key = torch.nn.functional.pad(key, (0, 0, 1, 0)) value = value[:, block_ctx - 1 : blocks * block_ctx - 1 : block_ctx, :] value = torch.nn.functional.pad(value, (0, 0, 1, 0)) return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :] key = torch.nn.functional.pad(key, (0, 0, 1, 0)) # batch_size, blocks, embed_dim value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :] value = torch.nn.functional.pad(value, (0, 0, 1, 0)) # batch_size, blocks, embed_dim return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def summary_spread_attn(self, query, key, value, sample): blocks = self.blocks spread = self.spread batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t if sample: raise NotImplementedError else: key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :] key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0)).contiguous() key = key.view(batch_size, blocks * spread, embed_dim) value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :] value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0)).contiguous() value = value.view(batch_size, blocks * spread, embed_dim) return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def prime_attn(self, query, key, value, sample): encoder_len = self._encoder_len key = key[:, :encoder_len] value = value[:, :encoder_len] return self.dense_attn(query, key, value, sample) def factored_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] if last_encoder_hidden_states is not None: raise TypeError("last_encoder_hidden_states should be None") query, key, value = hidden_states.chunk(3, dim=2) if sample: self.sample_t += curr_ctx key, value = self._append_cache(key, value) l_cache = self._suff_cache_len() if self._cache_len() > l_cache: self._slice_cache(-l_cache) if curr_ctx > 1: if self.attn_func != "dense_attn": query = self._pad_to_block_ctx(query, query=True) key = self._pad_to_block_ctx(key) value = self._pad_to_block_ctx(value) sample = False else: key = self.cache["key"] value = self.cache["value"] return query, key, value, sample def prime_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] if last_encoder_hidden_states is not None: raise TypeError("last_encoder_hidden_states should be None") query, key, value = hidden_states.chunk(3, dim=2) if sample: if self._cache_len() < self._encoder_len: self._append_cache(key, value) if self._cache_len() > self._encoder_len: self._slice_cache(0, self._encoder_len) key, value = self.cache["key"], self.cache["value"] self.sample_t += curr_ctx return query, key, value, sample def decode_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] query = hidden_states if sample: if self.sample_t == 0: self.cache["key"], self.cache["value"] = self.c_enc_kv( last_encoder_hidden_states.type_as(hidden_states) ).chunk(2, dim=2) key, value = self.cache["key"], self.cache["value"] self.sample_t += curr_ctx else: key, value = self.c_enc_kv(last_encoder_hidden_states.type_as(hidden_states)).chunk(2, dim=2) return query, key, value, sample def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] hidden_states = self.c_attn(hidden_states) query, key, value, sample = self.qkv( hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample ) attention_scores = self.attn(query, key, value, sample) if attention_scores.shape[1] != curr_ctx: offset = self._offset(curr_ctx) attention_scores = attention_scores[:, offset : offset + curr_ctx, :].contiguous() attention_scores = self.c_proj(attention_scores) return self.resid_dropout(attention_scores) @property def _encoder_len(self): encoder_len = self.encoder_len encoder_blocks = (encoder_len // self.blocks) + 1 return encoder_blocks * self.blocks def _offset(self, curr_ctx): if self.attn_func == "dense_attn": return 0 return (self.sample_t - curr_ctx) % self.block_ctx def _pad_to_block_ctx(self, hidden_states, query=False): seq_len = hidden_states.shape[1] offset = self._offset(seq_len) if query else 0 n_blocks = (seq_len + offset + self.block_ctx - 1) // self.block_ctx pad = n_blocks * self.block_ctx - seq_len - offset if pad == 0 and offset == 0: return hidden_states else: return F.pad(hidden_states, (0, 0, offset, pad)) def _cache_len(self): return 0 if "key" not in self.cache else self.cache["key"].shape[1] def _suff_cache_len(self): """ Precondition: key and value are appended with the current context and self.sample_t reflects the 1-indexed sample location in the context. """ previous_block_length = (self.sample_t - 1) % self.block_ctx + 1 + self.block_ctx REQUIRED_CACHE_LEN = { "dense_attn": self.sample_t, "block_attn": (self.sample_t - 1) % self.block_ctx + 1, "transpose_block_attn": self.sample_t, "prev_block_attn": self.sample_t if self.sample_t <= self.block_ctx else previous_block_length, "cross_attn": self.encoder_len, "prime_attn": min(self.sample_t, self._encoder_len), } return REQUIRED_CACHE_LEN[self.attn_func] def _slice_cache(self, start, end=None): self.cache["key"] = self.cache["key"][:, start:end] self.cache["value"] = self.cache["value"][:, start:end] def _append_cache(self, key, value): if "key" not in self.cache: self.cache["key"] = key self.cache["value"] = value else: old_key, old_value = key, value key = torch.cat([self.cache["key"], old_key], dim=1) value = torch.cat([self.cache["value"], old_value], dim=1) del self.cache["key"] del self.cache["value"] del old_key del old_value self.cache["key"] = key self.cache["value"] = value return self.cache["key"], self.cache["value"] def del_cache(self): self.sample_t = 0 if "key" in self.cache: del self.cache["key"] if "value" in self.cache: del self.cache["value"] self.cache = {} class JukeboxBlock(nn.Module): def __init__(self, config, n_ctx, attn_func="dense_attn"): super().__init__() self.width = config.hidden_size self.attn = JukeboxAttention(config, n_ctx, attn_func=attn_func) self.layer_norm_0 = JukeboxLayerNorm(config.hidden_size) self.mlp = JukeboxMLP(config) self.layer_norm_1 = JukeboxLayerNorm(config.hidden_size) self.res_scale = 1.0 / config.num_layers if config.attn_res_scale else 1.0 self.attn_func = attn_func def forward(self, hidden_states, last_encoder_hidden_states, sample=False): residuals = hidden_states hidden_states = self.layer_norm_0(hidden_states) hidden_states = self.attn(hidden_states, last_encoder_hidden_states, sample) output_states = self.layer_norm_1(residuals + hidden_states) output_states = self.mlp(output_states) if self.res_scale == 1.0: output = residuals + hidden_states + output_states else: output = residuals + self.res_scale * (hidden_states + output_states) return output class JukeboxLayerStack(nn.Module): def __init__(self, config, n_ctx): super().__init__() self.n_ctx = n_ctx self.width = config.hidden_size self.num_layers = config.num_layers self.blocks = config.blocks self.attention_pattern = config.attention_pattern if self.blocks is not None: self.block_ctx = n_ctx // self.blocks self.encoder_len = config.nb_relevant_lyric_tokens self.n_heads = config.n_heads # Orders of attn_func attention_pattern = ATTENTION_PATTERNS[self.attention_pattern] self._attn_mods = nn.ModuleList() for depth in range(self.num_layers): self._attn_mods.append(JukeboxBlock(config, n_ctx, attn_func=attention_pattern(depth))) self.saved_attn_weights = [] def set_record_attn(self, record_attn): """ Makes forward prop dump self-attention softmaxes to self.saved_attn_weights. Args: record_attn (`Union[bool,set]`): Either a set of layer indices indicating which layers to store, or a boolean value indicating Whether to dump all. """ def _should_record_attn(layer_idx): if isinstance(record_attn, bool): return record_attn return layer_idx in record_attn for i, layer in enumerate(self._attn_mods): layer.attn.record_attn = _should_record_attn(i) if not record_attn: self.saved_attn_weights = [] def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False): # Blocks for i, attn_layer in enumerate(self._attn_mods): if attn_layer.attn_func == "cross_attention": # attend to the lyrics hidden_states = attn_layer( hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample ) else: hidden_states = attn_layer(hidden_states, last_encoder_hidden_states=None, sample=sample) if attn_layer.attn.record_attn: self.saved_attn_weights.append(attn_layer.attn.c_attn.weight) return hidden_states def del_cache(self): for attn_layer in self._attn_mods: attn_layer.attn.del_cache() class JukeboxPositionalEmbedding(nn.Module): def __init__(self, embed_dim, width): super().__init__() self.pos_emb = nn.Parameter(torch.empty((embed_dim, width))) def forward(self): pos_emb = self.pos_emb return pos_emb class JukeboxConditionalAutoregressive(nn.Module): def __init__( self, config, n_ctx=None, embed_dim=None, audio_conditioning=False, metadata_conditioning=False, is_encoder=False, ): """ Autoregressive model on either lyric tokens or music tokens, or both. The attention pattern should be properly set fro each configuration. Args: config (`JukeboxPriorConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. n_ctx (`int`, *optional*): Number of tokens or lyrics tokens provided in a single pass. embed_dim (`int`, *optional*): Either equals to the dimension of the codebook, or the sum of n_vocab (lyrics) and codeboook dimension, if the model combines lyrics and music tokens, or simply n_vocab if the model is a seperate encoder audio_conditioning (`bool`, *optional*, defaults to `False`): Whether or not the prior supports conditionning on audio. metadata_conditioning (`bool`, *optional*, defaults to `False`): Whether or not the prior supports conditionning on artitst, genres, lyrics and timing. is_encoder (`bool`, *optional*, defaults to `False`): Whether the model is an encoder only model. """ super().__init__() self.width = config.hidden_size self.num_layers = config.num_layers self.n_ctx = n_ctx if n_ctx is not None else config.n_ctx self.embed_dim = embed_dim if embed_dim is not None else config.music_vocab_size self.embed_tokens = nn.Embedding(self.embed_dim, config.hidden_size) self.embed_tokens_dropout = nn.Dropout(config.emb_dropout) self.metadata_conditioning = metadata_conditioning self.audio_conditioning = audio_conditioning if not metadata_conditioning: self.start_token = nn.Parameter(torch.empty((1, config.hidden_size))) self.pos_emb = JukeboxPositionalEmbedding(self.n_ctx, config.hidden_size) self.pos_emb_dropout = nn.Dropout(config.emb_dropout) self.transformer = JukeboxLayerStack(config, n_ctx=self.n_ctx) self.is_encoder = is_encoder self.encoder_len = config.nb_relevant_lyric_tokens if config.merged_decoder: # Merged piped model uses this setup self.add_cond_after_transformer = False self.share_embed_tokens_fc_proj_out = False else: self.add_cond_after_transformer = True self.share_embed_tokens_fc_proj_out = True if not is_encoder: self.fc_proj_out = nn.Linear(config.hidden_size, self.embed_dim, bias=False) if self.share_embed_tokens_fc_proj_out: self.fc_proj_out.weight = self.embed_tokens.weight self.loss = torch.nn.CrossEntropyLoss() def forward( self, tokens, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, get_preds=False, get_acts=False, get_sep_loss=False, ): """ Args: tokens (`torch.tensor`): Can represent music tokens, lyrics tokens or both, depending on the configuration. """ # Preprocess. batch_size = tokens.shape[0] with torch.no_grad(): tokens = tokens.view(batch_size, -1).long() if not self.audio_conditioning: audio_conditioning = torch.zeros( (batch_size, 1, self.width), device=tokens.device, dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype, ) target = tokens # Target hidden_states = self.embed_tokens(tokens) # Shift by 1, and fill in start token hidden_states = torch.cat((hidden_states[:, -1:], hidden_states[:, :-1]), dim=1) if self.metadata_conditioning: hidden_states[:, 0] = metadata_conditioning.view(batch_size, self.width) else: hidden_states[:, 0] = self.start_token hidden_states = ( self.embed_tokens_dropout(hidden_states) + self.pos_emb_dropout(self.pos_emb()) + audio_conditioning ) # Pos emb and dropout hidden_states = self.transformer( hidden_states, last_encoder_hidden_states=last_encoder_hidden_states ) # Transformer if self.add_cond_after_transformer: # Piped doesnt add x_cond hidden_states = hidden_states + audio_conditioning activations = hidden_states if self.is_encoder: return hidden_states hidden_states = self.fc_proj_out(hidden_states) # Predictions loss_fn = nn.CrossEntropyLoss() if get_sep_loss: lyric_hidden_states = hidden_states[:, : self.encoder_len].reshape(-1, self.embed_dim) token_hidden_states = hidden_states[:, self.encoder_len :].reshape(-1, self.embed_dim) lyric_loss = loss_fn(lyric_hidden_states, target[:, : self.encoder_len].reshape(-1)) / np.log(2.0) music_token_loss = loss_fn(token_hidden_states, target[:, self.encoder_len :].reshape(-1)) / np.log(2.0) loss = (lyric_loss, music_token_loss) # Note order! Lyric is first else: loss = loss_fn(hidden_states.view(-1, self.embed_dim), target.view(-1)) / np.log(2.0) # Loss if get_preds: return loss, hidden_states elif get_acts: return loss, activations else: return loss, None def get_emb(self, sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning): if sample_t == 0: hidden_states = torch.empty(n_samples, 1, self.width, dtype=self.embed_tokens.weight.dtype).to( self.embed_tokens.weight.device ) if self.metadata_conditioning: hidden_states[:, 0] = metadata_conditioning.view(n_samples, self.width) else: hidden_states[:, 0] = self.start_token else: hidden_states = self.embed_tokens(tokens) if audio_conditioning.shape == (n_samples, self.n_ctx, self.width): cond = audio_conditioning[:, sample_t : sample_t + 1, :] else: cond = audio_conditioning # Pos emb, dropout is identity at eval time hidden_states = hidden_states + self.pos_emb()[sample_t : sample_t + 1] + cond return hidden_states, cond def sample( self, n_samples, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, temp=1.0, top_k=0, top_p=0.0, get_preds=False, sample_tokens=None, ): if sample_tokens is None: sample_tokens = self.n_ctx if not self.audio_conditioning: audio_conditioning = torch.zeros( (n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype ).to(self.fc_proj_out.device) with torch.no_grad(): sampled_tokens = [] tokens = None if get_preds: preds = [] iter = tqdm(range(0, sample_tokens), leave=False) for sample_t in iter: iter.set_description(f"Ancestral sampling {sample_tokens} music tokens", refresh=True) hidden_states, cond = self.get_emb( sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning ) hidden_states = self.transformer( hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True ) if self.add_cond_after_transformer: hidden_states = hidden_states + cond hidden_states = self.fc_proj_out(hidden_states) # Predictions if get_preds: preds.append(hidden_states.clone()) # Adjust logits hidden_states = hidden_states / temp hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p) # Sample and replace hidden_states tokens = torch.distributions.Categorical(logits=hidden_states).sample() sampled_tokens.append(tokens.clone()) del tokens self.transformer.del_cache() tokens = torch.cat(sampled_tokens, dim=1) if get_preds: preds = torch.cat(preds, dim=1) if get_preds: return tokens, preds else: return tokens def split_chunks(self, length, chunk_size): n_passes = (length + chunk_size - 1) // chunk_size chunk_sizes = [*[chunk_size] * (n_passes - 1), (length - 1) % chunk_size + 1] return chunk_sizes def primed_sample( self, n_samples, lyric_and_music_tokens, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, temp=1.0, top_k=0, top_p=0.0, get_preds=False, chunk_size=None, sample_tokens=None, ): if sample_tokens is None: sample_tokens = self.n_ctx # Preprocess. batch_size = lyric_and_music_tokens.shape[0] with torch.no_grad(): lyric_and_music_tokens = lyric_and_music_tokens.view(batch_size, -1).long() sampled_audio = torch.split(lyric_and_music_tokens, 1, dim=1) sampled_audio = list(sampled_audio) if not self.audio_conditioning: audio_conditioning = torch.zeros( (n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype ).to(lyric_and_music_tokens.device) with torch.no_grad(): if get_preds: preds = [] # Fill up key/value cache for past context by runing forward pass. # We do so in chunks instead of doing the whole past in one forward pass to reduce max memory usage. if chunk_size is None: chunk_size = len(sampled_audio) chunk_sizes = self.split_chunks(len(sampled_audio), chunk_size) x_primes = [] start = 0 token = None for current_chunk_size in tqdm(chunk_sizes, desc="Preparing past key value", leave=False): sampled_audio_prime, conds_prime = [], [] for sample_t in range(start, start + current_chunk_size): x_prime, cond_prime = self.get_emb( sample_t, n_samples, token, audio_conditioning, metadata_conditioning ) token = sampled_audio[sample_t] sampled_audio_prime.append(x_prime) conds_prime.append(cond_prime) start = start + current_chunk_size x_prime, cond_prime = torch.cat(sampled_audio_prime, dim=1), torch.cat(conds_prime, dim=1) del sampled_audio_prime del conds_prime if not get_preds: del cond_prime x_prime = self.transformer(x_prime, last_encoder_hidden_states=last_encoder_hidden_states, sample=True) if get_preds: if self.add_cond_after_transformer: x_prime = x_prime + cond_prime del cond_prime x_primes.append(x_prime) else: del x_prime if get_preds: x_prime = torch.cat(x_primes, dim=1) x_prime = self.fc_proj_out(x_prime) # Predictions preds.append(x_prime) # the input of the encoder and decoder can be merged into (lyrics, music tokens) input_tokens = sampled_audio[-1] itererator = tqdm( range(len(sampled_audio), sample_tokens), desc=f"Sampling {len(range(len(sampled_audio), sample_tokens))} music tokens", leave=False, ) for sample_t in itererator: hidden_states, cond = self.get_emb( sample_t, n_samples, input_tokens, audio_conditioning, metadata_conditioning ) hidden_states = self.transformer( hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True ) if self.add_cond_after_transformer: hidden_states = hidden_states + cond hidden_states = self.fc_proj_out(hidden_states) # Predictions if get_preds: preds.append(hidden_states) # Adjust logits hidden_states = hidden_states / temp hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p) # only music tokens are sampled music_tokens = torch.distributions.Categorical(logits=hidden_states).sample() sampled_audio.append(music_tokens.clone()) input_tokens = music_tokens del input_tokens, music_tokens self.transformer.del_cache() music_tokens = torch.cat(sampled_audio, dim=1) if get_preds: preds = torch.cat(preds, dim=1) if get_preds: return music_tokens, preds else: return music_tokens class JukeboxMusicTokenConditioner(nn.Module): """ The `JukeboxMusicTokenConditioner` takes music tokens as an input (coresponding to the codes of the VQVAE's codebook) and upsamples it using a single layer of decoder convolution block (the same is used in the VQVAE). """ def __init__(self, config, level): super().__init__() self.embed_tokens = nn.Embedding(config.music_vocab_size, config.hidden_size) config.embed_dim = config.music_vocab_size # setting correct argument for the `JukeboxDecoder` self.upsampler = JukeboxDecoderConvBock( config, config.hidden_size, config.res_conv_width, config.res_conv_depth, config.res_downs_t[level], config.res_strides_t[level], reverse_dilation=False, ) self.layer_norm = JukeboxLayerNorm(config.hidden_size) def forward(self, music_tokens, raw_audio_conditionning=None): """ Args: music_tokens (`torch.LongTensor`): Music tokens form the uper level in range(nb_discrete_codes) raw_audio_conditionning (`torch.LongTensor`, *optional*): Audio used when primed sampling, raw audio information that conditions the generation """ if raw_audio_conditionning is None: raw_audio_conditionning = 0.0 # Embed music_tokens music_tokens = music_tokens.long() hidden_states = self.embed_tokens(music_tokens) hidden_states = hidden_states + raw_audio_conditionning # Run conditioner hidden_states = hidden_states.permute(0, 2, 1) hidden_states = self.upsampler(hidden_states) hidden_states = hidden_states.permute(0, 2, 1) hidden_states = self.layer_norm(hidden_states) return hidden_states class JukeboxRangeEmbedding(nn.Module): """ The `JukeboxRangeEmbedding` interpolate the given [pos_start, pos_end] to obtain an equivalent of time positional embedding of length `n_ctx`. Binning process : For each pos in position tensor, find its bin [start,end) mapped to [0,1,...,bins-1] [start,end) -> [0,1) -> [0, bins) -> floor -> [0,...,bins-1] NOTE: Open ended interval on right, so start <= pos < end, not <= end """ def __init__(self, n_time, embed_dim, range, out_width, clamp=False): super().__init__() self.n_time = n_time self.embed_dim = embed_dim self.emb = nn.Embedding(embed_dim, out_width) self.pos_min, self.pos_max = range self.clamp = clamp def forward(self, pos_start, pos_end=None): # Check if [pos_start,pos_end] in [pos_min, pos_max) if not len(pos_start.shape) == 2: raise TypeError(f"Expected shape with 2 dims, got {pos_start.shape}") if not (self.pos_min <= pos_start).all() and (pos_start < self.pos_max).all(): raise TypeError(f"Range is [{self.pos_min},{self.pos_max}), got {pos_start}") pos_start = pos_start.float() if pos_end is not None: if self.clamp: pos_end = pos_end.clamp(self.pos_min, self.pos_max) pos_end = pos_end.float() # Interpolate so that [pos_start, ..., pos_end] <-> position tensor of length n_ctx n_time = self.n_time if n_time != 1: interpolation = ( torch.arange(0, n_time, dtype=torch.float, device=pos_start.device).view(1, n_time) / n_time ) position = pos_start + (pos_end - pos_start) * interpolation else: position = pos_start # Bin each value to bins_ # [0,1) -> [0,1..,embed_dim) -> [0,1...,embed_dim-1 normalised_position = (position - self.pos_min) / (self.pos_max - self.pos_min) bins_ = (self.embed_dim * normalised_position).floor().long().detach() return self.emb(bins_) class JukeboxLabelConditioner(nn.Module): def __init__(self, config, include_time_signal): super().__init__() embed_dim = config.hidden_size timing_dims = config.timing_dims sampling_rate = config.sampling_rate nb_genres, nb_artists = config.metadata_dims music_tokens_shape = config.n_ctx self.max_nb_genres = config.max_nb_genres self.bow_genre_emb = nn.Embedding(nb_genres, embed_dim) self.artist_emb = nn.Embedding(nb_artists, embed_dim) self.include_time_signal = include_time_signal if self.include_time_signal: total_length_range = (config.min_duration * sampling_rate, config.max_duration * sampling_rate) absolute_pos_range = (0.0, config.max_duration * sampling_rate) relative_pos_range = (0.0, 1.0) self.total_length_emb = JukeboxRangeEmbedding(1, timing_dims, total_length_range, embed_dim) self.absolute_pos_emb = JukeboxRangeEmbedding( music_tokens_shape, timing_dims, absolute_pos_range, embed_dim ) self.relative_pos_emb = JukeboxRangeEmbedding( music_tokens_shape, timing_dims, relative_pos_range, embed_dim, clamp=True ) def forward(self, metadata): total_length = metadata[:, 0:1] offset = metadata[:, 1:2] length = metadata[:, 2:3] artist = metadata[:, 3:4] genre = metadata[:, 4:] # Start embedding of length 1 artist_emb = self.artist_emb(artist) # Empty genre slots are denoted by -1. We mask these out. mask = (genre >= 0).float().unsqueeze(2) genre_emb = (self.bow_genre_emb(genre.clamp(0)) * mask).sum(dim=1, keepdim=True) start_emb = genre_emb + artist_emb # Pos embedding of length n_ctx if self.include_time_signal: start, end = offset, offset + length total_length = total_length.float() start = start.float() end = end.float() pos_emb = ( self.total_length_emb(total_length) + self.absolute_pos_emb(start, end) + self.relative_pos_emb(start / total_length, end / total_length) ) else: pos_emb = None return start_emb, pos_emb class JukeboxPrior(PreTrainedModel): """ The JukeboxPrior class, which is a wrapper around the various conditioning and the transformer. JukeboxPrior can be seen as language models trained on music. They model the next `music token` prediction task. If a (lyric) `encoderù is defined, it also models the `next character` prediction on the lyrics. Can be conditionned on timing, artist, genre, lyrics and codes from lower-levels Priors. Args: config (`JukeboxPriorConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. level (`int`, *optional*): Current level of the Prior. Should be in range `[0,nb_priors]`. nb_priors (`int`, *optional*, defaults to 3): Total number of priors. vqvae_encoder (`Callable`, *optional*): Encoding method of the VQVAE encoder used in the forward pass of the model. Passing functions instead of the vqvae module to avoid getting the parameters. vqvae_decoder (`Callable`, *optional*): Decoding method of the VQVAE decoder used in the forward pass of the model. Passing functions instead of the vqvae module to avoid getting the parameters. """ config_class = JukeboxPriorConfig def _init_weights(self, module): init_scale = self.config.init_scale if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=0.02 * init_scale) elif isinstance(module, JukeboxConv1D): if self.config.zero_out: module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=0.02 * init_scale) elif isinstance(module, JukeboxPositionalEmbedding): module.pos_emb.data.normal_(mean=0.0, std=0.01 * init_scale) elif isinstance(module, JukeboxRangeEmbedding): module.emb.weight.data.normal_(mean=0.0, std=0.01 * init_scale) elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, "lm_head"): module.lm_head.weight.data.normal_(mean=0.0, std=0.02 * init_scale) elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, "start_token"): module.start_token.data.normal_(mean=0.0, std=0.01 * init_scale) elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out: module.conv1d_2.weigth.data.zero_() module.conv1d_2.bias.data.zero_() if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def __init__(self, config: JukeboxPriorConfig, level=None, nb_priors=3, vqvae_encoder=None, vqvae_decoder=None): super().__init__(config) # Passing functions instead of the vqvae module to avoid getting params, only used in the # forward loop self.vqvae_encoder = vqvae_encoder self.vqvae_decoder = vqvae_decoder self.levels = nb_priors self.level = level if level is not None else config.level self.base_model_prefix = f"priors.{self.level}" self.n_ctx = config.n_ctx self.lyric_conditioning = config.nb_relevant_lyric_tokens > 0 self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens self.encoder_loss_fraction = config.encoder_loss_fraction # Audio conditioning : conditioning on music tokens (either from audio or from previous levels or both) self.audio_conditioning = self.level != 0 self.cond_level = self.level - 1 if self.audio_conditioning: self.conditioner_blocks = JukeboxMusicTokenConditioner(config, self.level) # metadata conditioning : contioning on timing, genres, and artist self.metadata_conditioning = config.metadata_conditioning if self.metadata_conditioning: self.metadata_embedding = JukeboxLabelConditioner(config, include_time_signal=not self.audio_conditioning) # define encoder-decoder or encoder and decoder self.is_encoder_decoder = config.is_encoder_decoder if config.is_encoder_decoder: # encoder-decoder transformer self.input_shapes = [config.nb_relevant_lyric_tokens, config.n_ctx] self.embed_dim_shift = [0, config.lyric_vocab_size] self.width = config.hidden_size self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens self.prior = JukeboxConditionalAutoregressive( config, n_ctx=config.nb_relevant_lyric_tokens + config.n_ctx, embed_dim=config.lyric_vocab_size + config.music_vocab_size, audio_conditioning=(self.audio_conditioning or self.metadata_conditioning), metadata_conditioning=True, ) else: # Separate encoder-decoder transformer encoder_config = config.encoder_config if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning: self.lyric_acts_width = encoder_config.hidden_size self.encoder_width = config.hidden_size self.encoder_dim = config.lyric_vocab_size self.encoder = JukeboxConditionalAutoregressive( encoder_config, n_ctx=self.nb_relevant_lyric_tokens, embed_dim=self.encoder_dim, audio_conditioning=False, metadata_conditioning=False, is_encoder=True, ) self.encoder.proj_in = JukeboxConv1D(encoder_config.hidden_size, config.hidden_size) self.encoder.final_layer_norm = JukeboxLayerNorm(config.hidden_size) self.encoder.lm_head = nn.Linear(config.hidden_size, config.lyric_vocab_size, bias=False) else: self.nb_relevant_lyric_tokens = 0 # decoder model on the tokens self.prior = JukeboxConditionalAutoregressive( config, audio_conditioning=(self.audio_conditioning or self.metadata_conditioning), metadata_conditioning=self.metadata_conditioning, ) self.next_token_prediction_loss_dims = config.n_ctx self.total_loss_dims = self.nb_relevant_lyric_tokens + self.next_token_prediction_loss_dims self.downsamples = [stride**down for stride, down in zip(config.res_strides_t, config.res_downs_t)] self.cond_downsample = self.downsamples[self.level] if self.level != 0 else None self.raw_to_tokens = np.prod(self.downsamples[: nb_priors - self.level]) self.sample_length = self.n_ctx * self.raw_to_tokens logger.info( f"Level:{self.level}, Cond downsample:{self.cond_downsample}, Raw to tokens:{self.raw_to_tokens}, Sample" f" length:{self.sample_length}" ) def get_metadata(self, labels, start, total_length, offset, get_indices=False): metadata = labels.clone() metadata[:, 0] = total_length # Set sample_length to match this level metadata[:, 2] = int(self.sample_length) # Set offset metadata[:, 1:2] = int(offset * self.raw_to_tokens) + int(start * self.raw_to_tokens) # here since metadata has the full token_list, we just need to selected the ones that are relevant # Set lyric tokens metadata, indices = self.set_metadata_lyric_tokens(metadata) if get_indices: return metadata, indices else: return metadata def set_metadata_lyric_tokens(self, labels): """ Processes the full labels to only retreive the relevant lyric tokens and keep the metadata conditioning tokens. """ if self.nb_relevant_lyric_tokens > 0: tokens_list = torch.zeros( (labels.shape[0], self.nb_relevant_lyric_tokens), dtype=torch.long, device=labels.device ) indices_list = [] # whats the index of each current character in original array for idx in range(labels.shape[0]): full_tokens = labels.clone()[:, 4 + self.metadata_embedding.max_nb_genres :] total_length, offset, duration = labels[idx, 0], labels[idx, 1], labels[idx, 2] tokens, indices = get_relevant_lyric_tokens( full_tokens, self.nb_relevant_lyric_tokens, total_length, offset, duration ) tokens_list[idx, :] = tokens indices_list.append(indices) return ( torch.cat((labels[:, : 4 + self.metadata_embedding.max_nb_genres], tokens_list), dim=-1), indices_list, ) else: return labels, None def get_music_tokens_conds(self, music_tokens, start, end): """ Extracts current level's conditioning music tokens. """ if self.level != 0: music_tokens_cond = music_tokens[self.level - 1] music_tokens = music_tokens_cond[:, start // self.cond_downsample : end // self.cond_downsample] missing_cond_len = self.n_ctx // self.cond_downsample - music_tokens_cond[-1].shape[-1] if missing_cond_len > 0: init_cond = torch.zeros(1, missing_cond_len).to(music_tokens_cond.device) music_tokens_cond = torch.cat((music_tokens_cond, init_cond), dim=-1).long() music_tokens_conds = [music_tokens_cond] else: music_tokens_conds = None return music_tokens_conds def prior_preprocess(self, tokens, conds): """ Shifts the input tokens to account for the dictionary merge. The embed_dim_shift give by how much the music tokens should be shifted by. It is equal to `lyric_vocab_size`. """ batch_size = tokens[0].shape[0] for i in range(len(tokens)): tokens[i] = (tokens[i] + int(self.embed_dim_shift[i])).view(batch_size, -1) for i in range(len(conds)): if conds[i] is None: conds[i] = torch.zeros( (batch_size, self.input_shapes[i], self.width), dtype=tokens[0].dtype, device=tokens[0].device ) return torch.cat(tokens, dim=1), torch.cat(conds, dim=1) def prior_postprocess(self, tokens): """ Shifts back the input tokens if the model uses an encoder decoder architecture. As the embedding layer is shared, `prior_embed_dim_shift` shifts the music token ids by `lyric_vocab_size`. Only returns the music tokens. """ batch_size = tokens.shape[0] dims = (self.input_shapes[0], tokens.shape[1] - self.input_shapes[0]) tokens = list(torch.split(tokens, dims, dim=1)) # Some of the input tokens might be shifted to take into account the voccabulary fusion for i in range(len(tokens)): bins_shift = int(self.embed_dim_shift[i]) tokens[i] = (tokens[i] - bins_shift).view(batch_size, -1) tokens[i] = torch.clamp(tokens[i], min=0) # If not masking loss, model may have generated lyric/midi tokens which are now shifted <0 by bin_shift return tokens[-1] def embed_tokens(self, music_tokens_conds): """ Embeds the upper level music tokens and upsamples them to provide as audio conditioning. """ music_tokens_conds = music_tokens_conds[: self.cond_level + 1] audio_conditioning = None for music_tokens_cond, conditioner_block in reversed(list(zip(music_tokens_conds, [self.conditioner_blocks]))): audio_conditioning = conditioner_block(music_tokens_cond, audio_conditioning) return audio_conditioning def encode(self, hidden_states, start_level=None, end_level=None, bs_chunks=1): """ Encodes the hidden states (raw audio) using the VQVAE's encoder. Returns latent_states. """ if start_level is None: start_level = self.level if end_level is None: end_level = self.levels # Get latents with torch.no_grad(): latent_states = self.vqvae_encoder( hidden_states, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks ) return latent_states def decode(self, music_tokens, start_level=None, end_level=None, bs_chunks=1): """ Usamples the sequence of codebook vectors to a raw audio. """ if start_level is None: start_level = self.level if end_level is None: end_level = self.levels with torch.no_grad(): output = self.vqvae_decoder( music_tokens, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks ) return output def get_cond(self, music_tokens_conds, metadata): """ Converts the input tokens to input_embeddings. Splits the lyrics form the rest of the metadata. Lyric tokens can be None. """ if metadata is not None: n_labels = metadata.shape[1] - self.nb_relevant_lyric_tokens metadata, lyric_tokens = metadata[:, :n_labels], metadata[:, n_labels:] else: metadata, lyric_tokens = None, None metadata_conditioning, metadata_pos = ( self.metadata_embedding(metadata) if self.metadata_conditioning else (None, None) ) audio_conditioning = self.embed_tokens(music_tokens_conds) if self.audio_conditioning else metadata_pos return audio_conditioning, metadata_conditioning, lyric_tokens def sample( self, n_samples, music_tokens=None, music_tokens_conds=None, metadata=None, temp=1.0, top_k=0, top_p=0.0, chunk_size=None, sample_tokens=None, ): """ Ancestral/Prime sampling a window of tokens using the provided conditioning and metadatas. Args: n_samples (`int`): Number of samples to generate. music_tokens (`List[torch.LongTensor]`, *optional*): Previously gemerated tokens at the current level. Used as context for the generation. music_tokens_conds (`List[torch.FloatTensor]`, *optional*): Upper-level music tokens generated by the previous prior model. Is `None` if the generation is not conditionned on the upper-level tokens. metadata (`List[torch.LongTensor]`, *optional*): List containing the metatdata tensor with the artist, genre and the lyric tokens. temp (`float`, *optional*, defaults to 1.0): Sampling temperature. top_k (`int`, *optional*, defaults to 0): Top k probabilities used for filtering. top_p (`float`, *optional*, defaults to 0.0): Top p probabilities used for filtering. chunk_size (`int`, *optional*): Size of the chunks used to prepare the cache of the transformer. sample_tokens (`int`, *optional*): Number of tokens to sample. """ no_past_context = music_tokens is None or music_tokens.shape[1] == 0 name = {True: "Ancestral", False: "Primed"}[no_past_context] logger.info(f"{name} sampling {n_samples} samples with temp={temp}, top_k={top_k}, top_p={top_p}") with torch.no_grad(): # Currently audio_conditioning only uses immediately above layer audio_conditioning, metadata_conditioning, lyric_tokens = self.get_cond(music_tokens_conds, metadata) if self.is_encoder_decoder: if no_past_context: # the prime_sample function will be used with music_tokens set to None lyric_and_music_tokens, audio_conditioning = self.prior_preprocess( [lyric_tokens], [None, audio_conditioning] ) else: lyric_and_music_tokens, audio_conditioning = self.prior_preprocess( [lyric_tokens, music_tokens], [None, audio_conditioning] ) if sample_tokens is not None: sample_tokens += self.nb_relevant_lyric_tokens music_tokens = self.prior.primed_sample( n_samples, lyric_and_music_tokens, audio_conditioning, metadata_conditioning, temp=temp, top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens, ) music_tokens = self.prior_postprocess(music_tokens) else: last_encoder_hidden_states = self.get_encoder_states(lyric_tokens, sample=True) if no_past_context: music_tokens = self.prior.sample( n_samples, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, temp=temp, top_k=top_k, top_p=top_p, sample_tokens=sample_tokens, ) else: music_tokens = self.prior.primed_sample( n_samples, music_tokens, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, temp=temp, top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens, ) return music_tokens def get_encoder_states(self, lyric_tokens, sample=False): """ Retreive the last hidden_states of the lyric encoder that will be attended to by the decoder. Forwards through the lyric encoder. """ if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning: if sample: self.encoder = self.encoder.to(lyric_tokens.device) lyric_acts = self.encoder(lyric_tokens, None, None, None) lyric_acts = self.encoder.proj_in(lyric_acts) last_encoder_hidden_states = self.encoder.final_layer_norm(lyric_acts) else: last_encoder_hidden_states = None return last_encoder_hidden_states def get_encoder_loss(self, last_encoder_hidden_states, target_lyrics): """ Computes the loss for the lyric encoder: next lyric token prediction. """ if self.lyric_conditioning: last_encoder_hidden_states = self.encoder.lm_head(last_encoder_hidden_states) encoder_loss = nn.functional.cross_entropy( last_encoder_hidden_states.view(-1, self.encoder_dim), target_lyrics.view(-1) ) / np.log(2.0) else: encoder_loss = torch.tensor(0.0, device=last_encoder_hidden_states.device) return encoder_loss def forward_tokens( self, music_tokens, music_tokens_conds=[], metadata=None, get_preds=False, get_attn_weights=False ): """ Applies a forward pass using the conditioning tokens. Different from the classic forward as it does not use the vqvae's encoding layers. """ if get_attn_weights: self.prior.transformer.set_record_attn(get_attn_weights) audio_conditioning, metadata_conditioning, lyric_tokens = self.get_cond(music_tokens_conds, metadata) if self.is_encoder_decoder: # the preprocess returns the full tokens (Lyrics and Music tokens), shifted tokens, audio_conditioning = self.prior_preprocess( [lyric_tokens, music_tokens], [None, audio_conditioning] ) (encoder_loss, next_token_prediction_loss), preds = self.prior( tokens, audio_conditioning, metadata_conditioning, get_sep_loss=True, get_preds=get_preds ) else: last_encoder_hidden_states = self.get_encoder_states(lyric_tokens) encoder_loss = self.get_encoder_loss(last_encoder_hidden_states, lyric_tokens) next_token_prediction_loss, preds = self.prior( music_tokens, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, get_preds=get_preds, ) loss = self.encoder_loss_fraction * encoder_loss * self.nb_relevant_lyric_tokens / self.total_loss_dims loss += next_token_prediction_loss * self.next_token_prediction_loss_dims / self.total_loss_dims metrics = { "bpd": next_token_prediction_loss.clone().detach(), "encoder_loss": encoder_loss.clone().detach(), "next_token_prediction_loss": next_token_prediction_loss.clone().detach(), } if get_preds: metrics["preds"] = preds.clone().detach() if get_attn_weights: saved_attn_weights = self.prior.transformer.saved_attn_weights self.prior.transformer.set_record_attn(False) return saved_attn_weights else: return loss, metrics def forward( self, hidden_states: torch.Tensor, metadata: Optional[List[torch.LongTensor]], decode: Optional[bool] = False, get_preds: Optional[bool] = False, ) -> List[torch.Tensor]: """ Encode the hidden states using the `vqvae` encoder, and then predicts the next token in the `forward_tokens` function. The loss is the sum of the `encoder` loss and the `decoder` loss. Args: hidden_states (`torch.Tensor`): Hidden states which should be raw audio metadata (`List[torch.LongTensor]`, *optional*): List containing the metadata conditioning tensorwith the lyric and the metadata tokens. decode (`bool`, *optional*, defaults to `False`): Whether or not to decode the encoded to tokens. get_preds (`bool`, *optional*, defaults to `False`): Whether or not to return the actual predicitons of the model. """ batch_size = hidden_states.shape[0] music_tokens, *music_tokens_conds = self.encode(hidden_states, bs_chunks=batch_size) loss, metrics = self.forward_tokens( music_tokens=music_tokens, music_tokens_conds=music_tokens_conds, metadata=metadata, get_preds=get_preds, ) if decode: dequantised_states = self.decode([music_tokens, *music_tokens_conds]) else: dequantised_states = None return dequantised_states, loss, metrics class JukeboxPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = JukeboxConfig base_model_prefix = "jukebox" supports_gradient_checkpointing = False def _init_weights(self, module): if isinstance(module, JukeboxPrior) or isinstance(module, JukeboxVQVAE): module.apply(module._init_weights) def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) JUKEBOX_SAMPLING_INPUT_DOCSTRING = r""" labels (`List[torch.LongTensor]` of length `n_sample`, and shape `(self.levels, self.config.max_nb_genre + lyric_sequence_length)` : List of metadata such as `artist_id`, `genre_id` and the full list of lyric tokens which are used to condition the generation. sampling_kwargs (`Dict[Any]`): Various additional sampling arguments that are used by the `_sample` function. A detail list of the arguments can bee seen in the [`_sample`] function documentation. """ @add_start_docstrings( """The bare JUKEBOX Model used for music generation. 4 sampling techniques are supported : `primed_sample`, `upsample`, `continue_sample` and `ancestral_sample`. It does not have a `forward` method as the training is not end to end. If you want to fine-tune the model, it is recommended to use the `JukeboxPrior` class and train each prior individually. """, JUKEBOX_START_DOCSTRING, ) class JukeboxModel(JukeboxPreTrainedModel): _no_split_modules = ["JukeboxBlock"] def __init__(self, config): super().__init__(config) vqvae_config = config.vqvae_config self.vqvae = JukeboxVQVAE(vqvae_config) self.set_shared_params(config) self.priors = nn.ModuleList( [JukeboxPrior(config.prior_configs[level], level) for level in range(config.nb_priors)] ) def set_shared_params(self, model_config): """ Initialises the parameters that are shared. This has to be done here because the list of `JukeboxPriorConfig` is nest, and is thus unreachable in the `from_dict` function """ for config in model_config.prior_configs: config.sampling_rate = model_config.sampling_rate config.timing_dims = model_config.timing_dims config.min_duration = model_config.min_duration config.max_duration = model_config.max_duration config.max_nb_genres = model_config.max_nb_genres config.metadata_conditioning = model_config.metadata_conditioning def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1): return self.vqvae.decode(music_tokens, start_level, end_level, bs_chunks) def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1): return self.vqvae.encode(input_audio, start_level, end_level, bs_chunks) def split_batch(self, obj, n_samples, split_size): n_passes = (n_samples + split_size - 1) // split_size if isinstance(obj, torch.Tensor): return torch.split(obj, split_size, dim=0) elif isinstance(obj, list): return list(zip(*[torch.split(item, split_size, dim=0) for item in obj])) elif obj is None: return [None] * n_passes else: raise TypeError("Unknown input type") # Sample a partial window of length<n_ctx with tokens_to_sample new tokens on level=level def sample_partial_window( self, music_tokens, labels, offset, sampling_kwargs, level, tokens_to_sample, max_batch_size ): prior = self.priors[level] sampled_tokens = music_tokens[level] n_ctx = prior.n_ctx nb_sampled_tokens = sampled_tokens.shape[1] if nb_sampled_tokens < n_ctx - tokens_to_sample: sampling_kwargs["sample_tokens"] = nb_sampled_tokens + tokens_to_sample start = 0 else: sampling_kwargs["sample_tokens"] = n_ctx start = nb_sampled_tokens - n_ctx + tokens_to_sample return self.sample_single_window(music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size) # Sample a single window of length=n_ctx at position=start on level=level def sample_single_window(self, music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size): prior = self.priors[level] n_samples = music_tokens[0].shape[0] n_ctx = prior.n_ctx end = start + n_ctx # get music_tokens already sampled at current level previous_sampled_tokens = music_tokens[level][:, start:end] sample_tokens = sampling_kwargs.get("sample_tokens", None) if "sample_tokens" in sampling_kwargs: sample_tokens = end - start conditioning_tokens = previous_sampled_tokens.shape[1] new_tokens = sample_tokens - previous_sampled_tokens.shape[1] logger.info( f"Sampling {sample_tokens} tokens for [{start},{start+sample_tokens}]. Conditioning on" f" {conditioning_tokens} tokens" ) if new_tokens <= 0: # Nothing new to sample return music_tokens # get music_tokens_conds from level above music_tokens_conds = prior.get_music_tokens_conds(music_tokens, start, end) # if there are no levels above should return None! # set metadata offset, sample_length and lyrics tokens metadata = prior.get_metadata(labels, start, self.total_length, offset) music_tokens_list = self.split_batch(previous_sampled_tokens, n_samples, max_batch_size) music_tokens_conds_list = self.split_batch(music_tokens_conds, n_samples, max_batch_size) metadata_list = self.split_batch(metadata, n_samples, max_batch_size) tokens = [] iterator = tqdm(zip(music_tokens_list, music_tokens_conds_list, metadata_list), leave=False) for music_tokens_i, music_tokens_conds_i, metadata_i in iterator: name = ["Ancestral", "Primed"][music_tokens_i.shape[1] == 0] iterator.set_description( f"[prior level {level}] {name} Sampling {sample_tokens} tokens out of" f" {self.total_length//prior.raw_to_tokens}", refresh=True, ) tokens_i = prior.sample( n_samples=music_tokens_i.shape[0], music_tokens=music_tokens_i, music_tokens_conds=music_tokens_conds_i, metadata=metadata_i, **sampling_kwargs, ) tokens.append(tokens_i) sampled_tokens = torch.cat(tokens, dim=0) # Update music_tokens with new sample music_tokens_new = sampled_tokens[:, -new_tokens:] music_tokens[level] = torch.cat([music_tokens[level], music_tokens_new], dim=1) return music_tokens # Sample total_length tokens at level=level with hop_length=hop_length def sample_level( self, music_tokens, labels, offset, sampling_kwargs, level, total_length, hop_length, max_batch_size ): if total_length >= self.priors[level].n_ctx: iterator = get_starts(total_length, self.priors[level].n_ctx, hop_length) for start in iterator: music_tokens = self.sample_single_window( music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size ) else: music_tokens = self.sample_partial_window( music_tokens, labels, offset, sampling_kwargs, level, total_length, max_batch_size ) return music_tokens @torch.no_grad() def _sample( self, music_tokens, labels, sample_levels, metas=None, chunk_size=32, sampling_temperature=0.98, lower_batch_size=16, max_batch_size=16, sample_length_in_seconds=24, compute_alignments=False, sample_tokens=None, offset=0, save_results=True, sample_length=None, ) -> List[torch.LongTensor]: """ Core sampling function used to generate music tokens. Iterates over the provided list of levels, while saving the generated raw audio at each step. Args: music_tokens (`List[torch.LongTensor]`): A sequence of music tokens of length `self.levels` which will be used as context to continue the sampling process. Should have `self.levels` tensors, each corresponding to the generation at a certain level. labels (`List[torch.LongTensor]`): List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre + lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens which are used to condition the generation. sample_levels (`List[int]`): List of the desired levels at which the sampling will be done. A level is equivalent to the index of the prior in the list of priors metas (`List[Any]`, *optional*): Metadatas used to generate the `labels` chunk_size (`int`, *optional*, defaults to 32): Size of a chunk of audio, used to fill up the memory in chuncks to prevent OOM erros. Bigger chunks means faster memory filling but more consumption. sampling_temperature (`float`, *optional*, defaults to 0.98): Temperature used to ajust the randomness of the sampling. lower_batch_size (`int`, *optional*, defaults to 16): Maximum batch size for the lower level priors max_batch_size (`int`, *optional*, defaults to 16): Maximum batch size for the top level priors sample_length_in_seconds (`int`, *optional*, defaults to 24): Desired length of the generation in seconds compute_alignments (`bool`, *optional*, defaults to `False`): Whether or not to compute the alignment between the lyrics and the audio using the top_prior sample_tokens (`int`, *optional*): Precise number of tokens that should be sampled at each level. This is mostly useful for running dummy experiments offset (`int`, *optional*, defaults to 0): Audio offset used as conditioning, corresponds to the starting sample in the music. If the offset is greater than 0, the lyrics will be shifted take that intoaccount save_results (`bool`, *optional*, defaults to `True`): Whether or not to save the intermediate results. If `True`, will generate a folder named with the start time. sample_length (`int`, *optional*): Desired length of the generation in samples. Returns: torch.Tensor Example: ```python >>> from transformers import AutoTokenizer, JukeboxModel, set_seed >>> import torch >>> metas = dict(artist="Zac Brown Band", genres="Country", lyrics="I met a traveller from an antique land") >>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics") >>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval() >>> labels = tokenizer(**metas)["input_ids"] >>> set_seed(0) >>> zs = [torch.zeros(1, 0, dtype=torch.long) for _ in range(3)] >>> zs = model._sample(zs, labels, [0], sample_length=40 * model.priors[0].raw_to_tokens, save_results=False) >>> zs[0] tensor([[1853, 1369, 1150, 1869, 1379, 1789, 519, 710, 1306, 1100, 1229, 519, 353, 1306, 1379, 1053, 519, 653, 1631, 1467, 1229, 1229, 10, 1647, 1254, 1229, 1306, 1528, 1789, 216, 1631, 1434, 653, 475, 1150, 1528, 1804, 541, 1804, 1434]]) ``` """ top_prior = self.priors[0] if sample_length is not None: total_length = sample_length else: total_length = ( int(sample_length_in_seconds * self.config.sampling_rate) // top_prior.raw_to_tokens ) * top_prior.raw_to_tokens if sample_levels is None: sample_levels = range(len(self.priors)) # total length of the signal, might be bit different from the actual generated length self.total_length = total_length for level in sample_levels: sampling_kwargs = { "temp": 0.99 if level == len(self.priors) - 1 else sampling_temperature, "chunk_size": chunk_size, "sample_tokens": sample_tokens, } # Set correct total_length, hop_length, labels and sampling_kwargs for level total_token_to_sample = total_length // self.priors[level].raw_to_tokens hop_length = int(self.config.hop_fraction[level] * self.priors[level].n_ctx) max_batch_size = lower_batch_size if level != sample_levels else max_batch_size music_tokens = self.sample_level( music_tokens, labels[level], offset, sampling_kwargs, level, total_token_to_sample, hop_length, max_batch_size, ) if save_results: self.vqvae.to(music_tokens[level].device) # Decode sample with torch.no_grad(): start_level = len(self.priors) - level - 1 # vqvae levels are reversed raw_audio = self.vqvae.decode( music_tokens[: level + 1], start_level=start_level, bs_chunks=music_tokens[level].shape[0] ) logdir = f"jukebox/level_{level}" if not os.path.exists(logdir): os.makedirs(logdir) save_temp_audio(logdir, level, metas=metas, aud=raw_audio.float()) if compute_alignments and self.priors[0] is not None and self.priors[0].nb_relevant_lyric_tokens > 0: with torch.no_grad(): alignments = get_alignment(music_tokens, labels[0], self.priors[0], self.config) torch.save({"alignments": alignments}, f"{logdir}/lyric_alignments.pt") return music_tokens @add_start_docstrings( """ Generates music tokens based on the provided `labels. Will start at the desired prior level and automatically upsample the sequence. If you want to create the audio, you should call `model.decode(tokens)`, which will use the VQ-VAE decoder to convert the music tokens to raw audio. Args: labels (`List[torch.LongTensor]`) : List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre + lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens which are used to condition the generation. n_samples (`int`, *optional*, default to 1) : Number of samples to be generated in parallel. """, ) def ancestral_sample(self, labels, n_samples=1, **sampling_kwargs) -> List[torch.LongTensor]: """ Example: ```python >>> from transformers import AutoTokenizer, JukeboxModel, set_seed >>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval() >>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics") >>> lyrics = "Hey, are you awake? Can you talk to me?" >>> artist = "Zac Brown Band" >>> genre = "Country" >>> metas = tokenizer(artist=artist, genres=genre, lyrics=lyrics) >>> set_seed(0) >>> music_tokens = model.ancestral_sample(metas.input_ids, sample_length=400) >>> with torch.no_grad(): ... model.decode(music_tokens)[:, :10].squeeze(-1) tensor([[-0.0219, -0.0679, -0.1050, -0.1203, -0.1271, -0.0936, -0.0396, -0.0405, -0.0818, -0.0697]]) ``` """ sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors)))) music_tokens = [ torch.zeros(n_samples, 0, dtype=torch.long, device=labels[0].device) for _ in range(len(self.priors)) ] music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens @add_start_docstrings( """Generates a continuation of the previously generated tokens. Args: music_tokens (`List[torch.LongTensor]` of length `self.levels` ) : A sequence of music tokens which will be used as context to continue the sampling process. Should have `self.levels` tensors, each corresponding to the generation at a certain level. """, JUKEBOX_SAMPLING_INPUT_DOCSTRING, ) def continue_sample(self, music_tokens, labels, **sampling_kwargs) -> List[torch.LongTensor]: sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors)))) music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens @add_start_docstrings( """Upsamples a sequence of music tokens using the prior at level `level`. Args: music_tokens (`List[torch.LongTensor]` of length `self.levels` ) : A sequence of music tokens which will be used as context to continue the sampling process. Should have `self.levels` tensors, each corresponding to the generation at a certain level. """, JUKEBOX_SAMPLING_INPUT_DOCSTRING, ) def upsample(self, music_tokens, labels, **sampling_kwargs) -> List[torch.LongTensor]: sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors) - 1))) music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens @add_start_docstrings( """Generate a raw audio conditioned on the provided `raw_audio` which is used as conditioning at each of the generation levels. The audio is encoded to music tokens using the 3 levels of the VQ-VAE. These tokens are used: as conditioning for each level, which means that no ancestral sampling is required. Args: raw_audio (`List[torch.Tensor]` of length `n_samples` ) : A list of raw audio that will be used as conditioning information for each samples that will be generated. """, JUKEBOX_SAMPLING_INPUT_DOCSTRING, ) def primed_sample(self, raw_audio, labels, **sampling_kwargs) -> List[torch.LongTensor]: sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors)))) self.vqvae.to(raw_audio.device).float() with torch.no_grad(): music_tokens = self.vqvae.encode( raw_audio, start_level=0, end_level=len(self.priors), bs_chunks=raw_audio.shape[0] ) music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens
transformers/src/transformers/models/deprecated/jukebox/modeling_jukebox.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/jukebox/modeling_jukebox.py", "repo_id": "transformers", "token_count": 54573 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = { "configuration_transfo_xl": ["TransfoXLConfig"], "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_transfo_xl"] = [ "AdaptiveEmbedding", "TransfoXLForSequenceClassification", "TransfoXLLMHeadModel", "TransfoXLModel", "TransfoXLPreTrainedModel", "load_tf_weights_in_transfo_xl", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_transfo_xl"] = [ "TFAdaptiveEmbedding", "TFTransfoXLForSequenceClassification", "TFTransfoXLLMHeadModel", "TFTransfoXLMainLayer", "TFTransfoXLModel", "TFTransfoXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_transfo_xl import TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/deprecated/transfo_xl/__init__.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/transfo_xl/__init__.py", "repo_id": "transformers", "token_count": 1165 }
# coding=utf-8 # Copyright 2022 BNRist (Tsinghua University), TKLNDST (Nankai University) and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert VAN checkpoints from the original repository. URL: https://github.com/Visual-Attention-Network/VAN-Classification""" import argparse import json import sys from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import torch import torch.nn as nn from huggingface_hub import cached_download, hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, VanConfig, VanForImageClassification from transformers.models.deprecated.van.modeling_van import VanLayerScaling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) @dataclass class Tracker: module: nn.Module traced: List[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor): has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) if has_not_submodules: if not isinstance(m, VanLayerScaling): self.traced.append(m) def __call__(self, x: Tensor): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(x) [x.remove() for x in self.handles] return self @property def parametrized(self): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced)) @dataclass class ModuleTransfer: src: nn.Module dest: nn.Module verbose: int = 0 src_skip: List = field(default_factory=list) dest_skip: List = field(default_factory=list) def __call__(self, x: Tensor): """ Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the hood we tracked all the operations in both modules. """ dest_traced = Tracker(self.dest)(x).parametrized src_traced = Tracker(self.src)(x).parametrized src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced)) dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced)) if len(dest_traced) != len(src_traced): raise Exception( f"Numbers of operations are different. Source module has {len(src_traced)} operations while" f" destination module has {len(dest_traced)}." ) for dest_m, src_m in zip(dest_traced, src_traced): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}") def copy_parameters(from_model: nn.Module, our_model: nn.Module) -> nn.Module: # nn.Parameter cannot be tracked by the Tracker, thus we need to manually convert them from_state_dict = from_model.state_dict() our_state_dict = our_model.state_dict() config = our_model.config all_keys = [] for stage_idx in range(len(config.hidden_sizes)): for block_id in range(config.depths[stage_idx]): from_key = f"block{stage_idx + 1}.{block_id}.layer_scale_1" to_key = f"van.encoder.stages.{stage_idx}.layers.{block_id}.attention_scaling.weight" all_keys.append((from_key, to_key)) from_key = f"block{stage_idx + 1}.{block_id}.layer_scale_2" to_key = f"van.encoder.stages.{stage_idx}.layers.{block_id}.mlp_scaling.weight" all_keys.append((from_key, to_key)) for from_key, to_key in all_keys: our_state_dict[to_key] = from_state_dict.pop(from_key) our_model.load_state_dict(our_state_dict) return our_model def convert_weight_and_push( name: str, config: VanConfig, checkpoint: str, from_model: nn.Module, save_directory: Path, push_to_hub: bool = True, ): print(f"Downloading weights for {name}...") checkpoint_path = cached_download(checkpoint) print(f"Converting {name}...") from_state_dict = torch.load(checkpoint_path)["state_dict"] from_model.load_state_dict(from_state_dict) from_model.eval() with torch.no_grad(): our_model = VanForImageClassification(config).eval() module_transfer = ModuleTransfer(src=from_model, dest=our_model) x = torch.randn((1, 3, 224, 224)) module_transfer(x) our_model = copy_parameters(from_model, our_model) if not torch.allclose(from_model(x), our_model(x).logits): raise ValueError("The model logits don't match the original one.") checkpoint_name = name print(checkpoint_name) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message="Add model", use_temp_dir=True, ) # we can use the convnext one image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message="Add image processor", use_temp_dir=True, ) print(f"Pushed {checkpoint_name}") def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True): filename = "imagenet-1k-id2label.json" num_labels = 1000 repo_id = "huggingface/label-files" num_labels = num_labels id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label label2id = {v: k for k, v in id2label.items()} ImageNetPreTrainedConfig = partial(VanConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) names_to_config = { "van-tiny": ImageNetPreTrainedConfig( hidden_sizes=[32, 64, 160, 256], depths=[3, 3, 5, 2], mlp_ratios=[8, 8, 4, 4], ), "van-small": ImageNetPreTrainedConfig( hidden_sizes=[64, 128, 320, 512], depths=[2, 2, 4, 2], mlp_ratios=[8, 8, 4, 4], ), "van-base": ImageNetPreTrainedConfig( hidden_sizes=[64, 128, 320, 512], depths=[3, 3, 12, 3], mlp_ratios=[8, 8, 4, 4], ), "van-large": ImageNetPreTrainedConfig( hidden_sizes=[64, 128, 320, 512], depths=[3, 5, 27, 3], mlp_ratios=[8, 8, 4, 4], ), } names_to_original_models = { "van-tiny": van_tiny, "van-small": van_small, "van-base": van_base, "van-large": van_large, } names_to_original_checkpoints = { "van-tiny": ( "https://huggingface.co/Visual-Attention-Network/VAN-Tiny-original/resolve/main/van_tiny_754.pth.tar" ), "van-small": ( "https://huggingface.co/Visual-Attention-Network/VAN-Small-original/resolve/main/van_small_811.pth.tar" ), "van-base": ( "https://huggingface.co/Visual-Attention-Network/VAN-Base-original/resolve/main/van_base_828.pth.tar" ), "van-large": ( "https://huggingface.co/Visual-Attention-Network/VAN-Large-original/resolve/main/van_large_839.pth.tar" ), } if model_name: convert_weight_and_push( model_name, names_to_config[model_name], checkpoint=names_to_original_checkpoints[model_name], from_model=names_to_original_models[model_name](), save_directory=save_directory, push_to_hub=push_to_hub, ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( model_name, config, checkpoint=names_to_original_checkpoints[model_name], from_model=names_to_original_models[model_name](), save_directory=save_directory, push_to_hub=push_to_hub, ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model-name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: van-tiny/small/base/large. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--van_dir", required=True, type=Path, help=( "A path to VAN's original implementation directory. You can download from here:" " https://github.com/Visual-Attention-Network/VAN-Classification" ), ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) args = parser.parse_args() pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) van_dir = args.van_dir # append the path to the parents to maskformer dir sys.path.append(str(van_dir.parent)) from van.models.van import van_base, van_large, van_small, van_tiny convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/deprecated/van/convert_van_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/van/convert_van_to_pytorch.py", "repo_id": "transformers", "token_count": 4514 }
# coding=utf-8 # Copyright 2023 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """EnCodec model configuration""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class EncodecConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`EncodecModel`]. It is used to instantiate a Encodec model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: target_bandwidths (`List[float]`, *optional*, defaults to `[1.5, 3.0, 6.0, 12.0, 24.0]`): The range of diffent bandwiths the model can encode audio with. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). audio_channels (`int`, *optional*, defaults to 1): Number of channels in the audio data. Either 1 for mono or 2 for stereo. normalize (`bool`, *optional*, defaults to `False`): Whether the audio shall be normalized when passed. chunk_length_s (`float`, *optional*): If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded. overlap (`float`, *optional*): Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following formulae : `int((1.0 - self.overlap) * self.chunk_length)`. hidden_size (`int`, *optional*, defaults to 128): Intermediate representation dimension. num_filters (`int`, *optional*, defaults to 32): Number of convolution kernels of first `EncodecConv1d` down sampling layer. num_residual_layers (`int`, *optional*, defaults to 1): Number of residual layers. upsampling_ratios (`Sequence[int]` , *optional*, defaults to `[8, 5, 4, 2]`): Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here that must match the decoder order. norm_type (`str`, *optional*, defaults to `"weight_norm"`): Normalization method. Should be in `["weight_norm", "time_group_norm"]` kernel_size (`int`, *optional*, defaults to 7): Kernel size for the initial convolution. last_kernel_size (`int`, *optional*, defaults to 7): Kernel size for the last convolution layer. residual_kernel_size (`int`, *optional*, defaults to 3): Kernel size for the residual layers. dilation_growth_rate (`int`, *optional*, defaults to 2): How much to increase the dilation with each layer. use_causal_conv (`bool`, *optional*, defaults to `True`): Whether to use fully causal convolution. pad_mode (`str`, *optional*, defaults to `"reflect"`): Padding mode for the convolutions. compress (`int`, *optional*, defaults to 2): Reduced dimensionality in residual branches (from Demucs v3). num_lstm_layers (`int`, *optional*, defaults to 2): Number of LSTM layers at the end of the encoder. trim_right_ratio (`float`, *optional*, defaults to 1.0): Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If equal to 1.0, it means that all the trimming is done at the right. codebook_size (`int`, *optional*, defaults to 1024): Number of discret codes that make up VQVAE. codebook_dim (`int`, *optional*): Dimension of the codebook vectors. If not defined, uses `hidden_size`. use_conv_shortcut (`bool`, *optional*, defaults to `True`): Whether to use a convolutional layer as the 'skip' connection in the `EncodecResnetBlock` block. If False, an identity function will be used, giving a generic residual connection. Example: ```python >>> from transformers import EncodecModel, EncodecConfig >>> # Initializing a "facebook/encodec_24khz" style configuration >>> configuration = EncodecConfig() >>> # Initializing a model (with random weights) from the "facebook/encodec_24khz" style configuration >>> model = EncodecModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "encodec" def __init__( self, target_bandwidths=[1.5, 3.0, 6.0, 12.0, 24.0], sampling_rate=24_000, audio_channels=1, normalize=False, chunk_length_s=None, overlap=None, hidden_size=128, num_filters=32, num_residual_layers=1, upsampling_ratios=[8, 5, 4, 2], norm_type="weight_norm", kernel_size=7, last_kernel_size=7, residual_kernel_size=3, dilation_growth_rate=2, use_causal_conv=True, pad_mode="reflect", compress=2, num_lstm_layers=2, trim_right_ratio=1.0, codebook_size=1024, codebook_dim=None, use_conv_shortcut=True, **kwargs, ): self.target_bandwidths = target_bandwidths self.sampling_rate = sampling_rate self.audio_channels = audio_channels self.normalize = normalize self.chunk_length_s = chunk_length_s self.overlap = overlap self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios self.norm_type = norm_type self.kernel_size = kernel_size self.last_kernel_size = last_kernel_size self.residual_kernel_size = residual_kernel_size self.dilation_growth_rate = dilation_growth_rate self.use_causal_conv = use_causal_conv self.pad_mode = pad_mode self.compress = compress self.num_lstm_layers = num_lstm_layers self.trim_right_ratio = trim_right_ratio self.codebook_size = codebook_size self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size self.use_conv_shortcut = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) super().__init__(**kwargs) # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_length(self) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_stride(self) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length)) @property def frame_rate(self) -> int: hop_length = np.prod(self.upsampling_ratios) return math.ceil(self.sampling_rate / hop_length) @property def num_quantizers(self) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10)) __all__ = ["EncodecConfig"]
transformers/src/transformers/models/encodec/configuration_encodec.py/0
{ "file_path": "transformers/src/transformers/models/encodec/configuration_encodec.py", "repo_id": "transformers", "token_count": 3319 }
# coding=utf-8 # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import sys from dataclasses import dataclass from functools import partial from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union import numpy as np import torch import torch.nn as nn from torch.nn import LayerNorm from ...integrations.deepspeed import is_deepspeed_available from ...modeling_outputs import ModelOutput from ...utils import ( ContextManagers, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, logging, replace_return_docstrings, ) from .configuration_esm import EsmConfig from .modeling_esm import ESM_START_DOCSTRING, EsmModel, EsmPreTrainedModel from .openfold_utils import ( OFProtein, Rigid, Rotation, atom14_to_atom37, chunk_layer, compute_predicted_aligned_error, compute_tm, frames_and_literature_positions_to_atom14_pos, make_atom14_masks, residue_constants, to_pdb, torsion_angles_to_frames, ) logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/esmfold_v1" _CONFIG_FOR_DOC = "EsmConfig" @dataclass class EsmForProteinFoldingOutput(ModelOutput): """ Output type of [`EsmForProteinFoldingOutput`]. Args: frames (`torch.FloatTensor`): Output frames. sidechain_frames (`torch.FloatTensor`): Output sidechain frames. unnormalized_angles (`torch.FloatTensor`): Predicted unnormalized backbone and side chain torsion angles. angles (`torch.FloatTensor`): Predicted backbone and side chain torsion angles. positions (`torch.FloatTensor`): Predicted positions of the backbone and side chain atoms. states (`torch.FloatTensor`): Hidden states from the protein folding trunk. s_s (`torch.FloatTensor`): Per-residue embeddings derived by concatenating the hidden states of each layer of the ESM-2 LM stem. s_z (`torch.FloatTensor`): Pairwise residue embeddings. distogram_logits (`torch.FloatTensor`): Input logits to the distogram used to compute residue distances. lm_logits (`torch.FloatTensor`): Logits output by the ESM-2 protein language model stem. aatype (`torch.FloatTensor`): Input amino acids (AlphaFold2 indices). atom14_atom_exists (`torch.FloatTensor`): Whether each atom exists in the atom14 representation. residx_atom14_to_atom37 (`torch.FloatTensor`): Mapping between atoms in the atom14 and atom37 representations. residx_atom37_to_atom14 (`torch.FloatTensor`): Mapping between atoms in the atom37 and atom14 representations. atom37_atom_exists (`torch.FloatTensor`): Whether each atom exists in the atom37 representation. residue_index (`torch.FloatTensor`): The index of each residue in the protein chain. Unless internal padding tokens are used, this will just be a sequence of integers from 0 to `sequence_length`. lddt_head (`torch.FloatTensor`): Raw outputs from the lddt head used to compute plddt. plddt (`torch.FloatTensor`): Per-residue confidence scores. Regions of low confidence may indicate areas where the model's prediction is uncertain, or where the protein structure is disordered. ptm_logits (`torch.FloatTensor`): Raw logits used for computing ptm. ptm (`torch.FloatTensor`): TM-score output representing the model's high-level confidence in the overall structure. aligned_confidence_probs (`torch.FloatTensor`): Per-residue confidence scores for the aligned structure. predicted_aligned_error (`torch.FloatTensor`): Predicted error between the model's prediction and the ground truth. max_predicted_aligned_error (`torch.FloatTensor`): Per-sample maximum predicted error. """ frames: torch.FloatTensor = None sidechain_frames: torch.FloatTensor = None unnormalized_angles: torch.FloatTensor = None angles: torch.FloatTensor = None positions: torch.FloatTensor = None states: torch.FloatTensor = None s_s: torch.FloatTensor = None s_z: torch.FloatTensor = None distogram_logits: torch.FloatTensor = None lm_logits: torch.FloatTensor = None aatype: torch.FloatTensor = None atom14_atom_exists: torch.FloatTensor = None residx_atom14_to_atom37: torch.FloatTensor = None residx_atom37_to_atom14: torch.FloatTensor = None atom37_atom_exists: torch.FloatTensor = None residue_index: torch.FloatTensor = None lddt_head: torch.FloatTensor = None plddt: torch.FloatTensor = None ptm_logits: torch.FloatTensor = None ptm: torch.FloatTensor = None aligned_confidence_probs: torch.FloatTensor = None predicted_aligned_error: torch.FloatTensor = None max_predicted_aligned_error: torch.FloatTensor = None ESMFOLD_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) masking_pattern (`torch.LongTensor` of shape `({0})`, *optional*): Locations of tokens to mask during training as a form of regularization. Mask values selected in `[0, 1]`. num_recycles (`int`, *optional*, defaults to `None`): Number of times to recycle the input sequence. If `None`, defaults to `config.num_recycles`. "Recycling" consists of passing the output of the folding trunk back in as input to the trunk. During training, the number of recycles should vary with each batch, to ensure that the model learns to output valid predictions after each recycle. During inference, num_recycles should be set to the highest value that the model was trained with for maximum accuracy. Accordingly, when this value is set to `None`, config.max_recycles is used. """ def is_fp16_enabled(): # Autocast world fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16 fp16_enabled = fp16_enabled and torch.is_autocast_enabled() return fp16_enabled def is_deepspeed_initialized(): if is_deepspeed_available(): return False else: try: import deepspeed # This is not available in all DeepSpeed versions. return deepspeed.utils.is_initialized() except Exception: return False def collate_dense_tensors(samples: List[torch.Tensor], pad_v: float = 0) -> torch.Tensor: """ Takes a list of tensors with the following dimensions: [(d_11, ..., d_1K), (d_21, ..., d_2K), ..., (d_N1, ..., d_NK)] and stack + pads them into a single tensor of: (N, max_i=1,N { d_i1 }, ..., max_i=1,N {diK}) """ if len(samples) == 0: return torch.Tensor() if len({x.dim() for x in samples}) != 1: raise RuntimeError(f"Samples has varying dimensions: {[x.dim() for x in samples]}") (device,) = tuple({x.device for x in samples}) # assumes all on same device max_shape = [max(lst) for lst in zip(*[x.shape for x in samples])] result = torch.empty(len(samples), *max_shape, dtype=samples[0].dtype, device=device) result.fill_(pad_v) for i in range(len(samples)): result_i = result[i] t = samples[i] result_i[tuple(slice(0, k) for k in t.shape)] = t return result def flatten_final_dims(t: torch.Tensor, no_dims: int): return t.reshape(t.shape[:-no_dims] + (-1,)) def permute_final_dims(tensor: torch.Tensor, inds: List[int]): zero_index = -1 * len(inds) first_inds = list(range(len(tensor.shape[:zero_index]))) return tensor.permute(first_inds + [zero_index + i for i in inds]) def dict_multimap(fn, dicts): first = dicts[0] new_dict = {} for k, v in first.items(): all_v = [d[k] for d in dicts] if isinstance(v, dict): new_dict[k] = dict_multimap(fn, all_v) else: new_dict[k] = fn(all_v) return new_dict def trunc_normal_init_(weights, scale=1.0, fan="fan_in"): shape = weights.shape scale = scale / max(1, shape[1]) if not is_scipy_available(): logger.warning( "This init requires scipy, but scipy was not found, default to an approximation that might not be" " equivalent." ) std = math.sqrt(scale) torch.nn.init.normal_(weights, std=std).clamp(min=0.0, max=2.0 * std) else: from scipy.stats import truncnorm std = math.sqrt(scale) / truncnorm.std(a=-2, b=2, loc=0, scale=1) samples = truncnorm.rvs(a=-2, b=2, loc=0, scale=std, size=weights.numel()) samples = np.reshape(samples, shape) weights.copy_(torch.tensor(samples, device=weights.device)) def ipa_point_weights_init_(weights): with torch.no_grad(): softplus_inverse_1 = 0.541324854612918 weights.fill_(softplus_inverse_1) class EsmFoldLinear(nn.Linear): """ A Linear layer with built-in nonstandard initializations. Called just like torch.nn.Linear. Implements the initializers in 1.11.4, plus some additional ones found in the code. """ def __init__( self, in_dim: int, out_dim: int, bias: bool = True, init: str = "default", init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None, ): """ Args: in_dim: The final dimension of inputs to the layer out_dim: The final dimension of layer outputs bias: Whether to learn an additive bias. True by default init: The initializer to use. Choose from: "default": LeCun fan-in truncated normal initialization "relu": He initialization w/ truncated normal distribution "glorot": Fan-average Glorot uniform initialization "gating": Weights=0, Bias=1 "normal": Normal initialization with std=1/sqrt(fan_in) "final": Weights=0, Bias=0 Overridden by init_fn if the latter is not None. init_fn: A custom initializer taking weight and bias as inputs. Overrides init if not None. """ super().__init__(in_dim, out_dim, bias=bias) if bias: with torch.no_grad(): self.bias.fill_(0) self.init = init self.init_fn = init_fn if init not in ["default", "relu", "glorot", "gating", "normal", "final"]: raise ValueError("Invalid init string.") class EsmFoldLayerNorm(nn.Module): def __init__(self, c_in, eps=1e-5): super().__init__() self.c_in = (c_in,) self.eps = eps self.weight = nn.Parameter(torch.ones(c_in)) self.bias = nn.Parameter(torch.zeros(c_in)) def forward(self, x): d = x.dtype if d is torch.bfloat16 and not is_deepspeed_initialized(): with torch.cuda.amp.autocast(enabled=False): out = nn.functional.layer_norm(x, self.c_in, self.weight.to(dtype=d), self.bias.to(dtype=d), self.eps) else: out = nn.functional.layer_norm(x, self.c_in, self.weight, self.bias, self.eps) return out @torch.jit.ignore def softmax_no_cast(t: torch.Tensor, dim: int = -1) -> torch.Tensor: """ Softmax, but without automatic casting to fp32 when the input is of type bfloat16 """ d = t.dtype if d is torch.bfloat16 and not is_deepspeed_initialized(): with torch.cuda.amp.autocast(enabled=False): s = torch.nn.functional.softmax(t, dim=dim) else: s = torch.nn.functional.softmax(t, dim=dim) return s class EsmFoldAttention(nn.Module): """ Standard multi-head attention using AlphaFold's default layer initialization. Allows multiple bias vectors. """ def __init__( self, c_q: int, c_k: int, c_v: int, c_hidden: int, no_heads: int, gating: bool = True, ): """ Args: c_q: Input dimension of query data c_k: Input dimension of key data c_v: Input dimension of value data c_hidden: Per-head hidden dimension no_heads: Number of attention heads gating: Whether the output should be gated using query data """ super().__init__() self.c_q = c_q self.c_k = c_k self.c_v = c_v self.c_hidden = c_hidden self.no_heads = no_heads self.gating = gating # DISCREPANCY: c_hidden is not the per-head channel dimension, as # stated in the supplement, but the overall channel dimension. self.linear_q = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, bias=False, init="glorot") self.linear_k = EsmFoldLinear(self.c_k, self.c_hidden * self.no_heads, bias=False, init="glorot") self.linear_v = EsmFoldLinear(self.c_v, self.c_hidden * self.no_heads, bias=False, init="glorot") self.linear_o = EsmFoldLinear(self.c_hidden * self.no_heads, self.c_q, init="final") self.linear_g = None if self.gating: self.linear_g = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, init="gating") self.sigmoid = nn.Sigmoid() def _prep_qkv(self, q_x: torch.Tensor, kv_x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # [*, Q/K/V, H * C_hidden] q = self.linear_q(q_x) k = self.linear_k(kv_x) v = self.linear_v(kv_x) # [*, Q/K, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) k = k.view(k.shape[:-1] + (self.no_heads, -1)) v = v.view(v.shape[:-1] + (self.no_heads, -1)) # [*, H, Q/K, C_hidden] q = q.transpose(-2, -3) k = k.transpose(-2, -3) v = v.transpose(-2, -3) q /= math.sqrt(self.c_hidden) return q, k, v def _wrap_up(self, o: torch.Tensor, q_x: torch.Tensor) -> torch.Tensor: if self.linear_g is not None: g = self.sigmoid(self.linear_g(q_x)) # [*, Q, H, C_hidden] g = g.view(g.shape[:-1] + (self.no_heads, -1)) o = o * g # [*, Q, H * C_hidden] o = flatten_final_dims(o, 2) # [*, Q, C_q] o = self.linear_o(o) return o def forward( self, q_x: torch.Tensor, kv_x: torch.Tensor, biases: Optional[List[torch.Tensor]] = None, use_memory_efficient_kernel: bool = False, use_lma: bool = False, lma_q_chunk_size: int = 1024, lma_kv_chunk_size: int = 4096, use_flash: bool = False, flash_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ Args: q_x: [*, Q, C_q] query data kv_x: [*, K, C_k] key data biases: List of biases that broadcast to [*, H, Q, K] use_memory_efficient_kernel: Whether to use a custom memory-efficient attention kernel. This should be the default choice for most. If none of the "use_<...>" flags are True, a stock PyTorch implementation is used instead use_lma: Whether to use low-memory attention (Staats & Rabe 2021). If none of the "use_<...>" flags are True, a stock PyTorch implementation is used instead lma_q_chunk_size: Query chunk size (for LMA) lma_kv_chunk_size: Key/Value chunk size (for LMA) Returns [*, Q, C_q] attention update """ if use_lma and (lma_q_chunk_size is None or lma_kv_chunk_size is None): raise ValueError("If use_lma is specified, lma_q_chunk_size and lma_kv_chunk_size must be provided") if use_flash and biases is not None: raise ValueError("use_flash is incompatible with the bias option. For masking, use flash_mask instead") attn_options = [use_memory_efficient_kernel, use_lma, use_flash] if sum(attn_options) > 1: raise ValueError("Choose at most one alternative attention algorithm") if biases is None: biases = [] # [*, H, Q/K, C_hidden] query, key, value = self._prep_qkv(q_x, kv_x) key = permute_final_dims(key, (1, 0)) # [*, H, Q, K] output = torch.matmul(query, key) for b in biases: output += b output = softmax_no_cast(output, -1) # [*, H, Q, C_hidden] output = torch.matmul(output, value) output = output.transpose(-2, -3) output = self._wrap_up(output, q_x) return output class EsmFoldTriangleAttention(nn.Module): def __init__(self, c_in, c_hidden, no_heads, starting=True, inf=1e9): """ Args: c_in: Input channel dimension c_hidden: Overall hidden channel dimension (not per-head) no_heads: Number of attention heads """ super().__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_heads = no_heads self.starting = starting self.inf = inf self.layer_norm = LayerNorm(self.c_in) self.linear = EsmFoldLinear(c_in, self.no_heads, bias=False, init="normal") self.mha = EsmFoldAttention(self.c_in, self.c_in, self.c_in, self.c_hidden, self.no_heads) @torch.jit.ignore def _chunk( self, x: torch.Tensor, biases: List[torch.Tensor], chunk_size: int, use_memory_efficient_kernel: bool = False, use_lma: bool = False, inplace_safe: bool = False, ) -> torch.Tensor: "triangle! triangle!" mha_inputs = { "q_x": x, "kv_x": x, "biases": biases, } return chunk_layer( partial(self.mha, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma), mha_inputs, chunk_size=chunk_size, no_batch_dims=len(x.shape[:-2]), _out=x if inplace_safe else None, ) def forward( self, x: torch.Tensor, mask: Optional[torch.Tensor] = None, chunk_size: Optional[int] = None, use_memory_efficient_kernel: bool = False, use_lma: bool = False, inplace_safe: bool = False, ) -> torch.Tensor: """ Args: x: [*, I, J, C_in] input tensor (e.g. the pair representation) Returns: [*, I, J, C_in] output tensor """ if mask is None: # [*, I, J] mask = x.new_ones( x.shape[:-1], ) if not self.starting: x = x.transpose(-2, -3) mask = mask.transpose(-1, -2) # [*, I, J, C_in] x = self.layer_norm(x) # [*, I, 1, 1, J] mask_bias = (self.inf * (mask - 1))[..., :, None, None, :] # [*, H, I, J] triangle_bias = permute_final_dims(self.linear(x), (2, 0, 1)) # [*, 1, H, I, J] triangle_bias = triangle_bias.unsqueeze(-4) biases = [mask_bias, triangle_bias] if chunk_size is not None: x = self._chunk( x, biases, chunk_size, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma, inplace_safe=inplace_safe, ) else: x = self.mha( q_x=x, kv_x=x, biases=biases, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma ) if not self.starting: x = x.transpose(-2, -3) return x class EsmFoldTriangleMultiplicativeUpdate(nn.Module): """ Implements Algorithms 11 and 12. """ def __init__(self, config, _outgoing=True): super().__init__() c_hidden = config.pairwise_state_dim self._outgoing = _outgoing self.linear_a_p = EsmFoldLinear(c_hidden, c_hidden) self.linear_a_g = EsmFoldLinear(c_hidden, c_hidden, init="gating") self.linear_b_p = EsmFoldLinear(c_hidden, c_hidden) self.linear_b_g = EsmFoldLinear(c_hidden, c_hidden, init="gating") self.linear_g = EsmFoldLinear(c_hidden, c_hidden, init="gating") self.linear_z = EsmFoldLinear(c_hidden, c_hidden, init="final") self.layer_norm_in = LayerNorm(c_hidden) self.layer_norm_out = LayerNorm(c_hidden) self.sigmoid = nn.Sigmoid() def _combine_projections( self, a: torch.Tensor, b: torch.Tensor, _inplace_chunk_size: Optional[int] = None ) -> torch.Tensor: if self._outgoing: a = permute_final_dims(a, (2, 0, 1)) b = permute_final_dims(b, (2, 1, 0)) else: a = permute_final_dims(a, (2, 1, 0)) b = permute_final_dims(b, (2, 0, 1)) if _inplace_chunk_size is not None: # To be replaced by torch vmap for i in range(0, a.shape[-3], _inplace_chunk_size): a_chunk = a[..., i : i + _inplace_chunk_size, :, :] b_chunk = b[..., i : i + _inplace_chunk_size, :, :] a[..., i : i + _inplace_chunk_size, :, :] = torch.matmul( a_chunk, b_chunk, ) p = a else: p = torch.matmul(a, b) return permute_final_dims(p, (1, 2, 0)) def _inference_forward( self, z: torch.Tensor, mask: Optional[torch.Tensor] = None, inplace_chunk_size: Optional[int] = None, with_add: bool = True, ): """ Args: z: A [*, N, N, C_z] pair representation mask: A [*, N, N] pair mask inplace_chunk_size: Size of chunks used in the main computation. Increase to trade memory for speed. with_add: If True, z is overwritten with (z + update). Otherwise, it is overwritten with (update). Returns: A reference to the overwritten z More memory-efficient, inference-only version of the forward function. Uses in-place operations, fusion of the addition that happens after this module in the Evoformer, a smidge of recomputation, and a cache of overwritten values to lower peak memory consumption of this module from 5x the size of the input tensor z to 2.5x its size. Useful for inference on extremely long sequences. It works as follows. We will make reference to variables used in the default forward implementation below. Naively, triangle multiplication attention requires the manifestation of 5 tensors the size of z: 1) z, the "square" input tensor, 2) a, the first projection of z, 3) b, the second projection of b, 4) g, a z-sized mask, and 5) a z-sized tensor for intermediate computations. For large N, this is prohibitively expensive; for N=4000, for example, z is more than 8GB alone. To avoid this problem, we compute b, g, and all intermediate tensors in small chunks, noting that the chunks required to compute a chunk of the output depend only on the tensor a and corresponding vertical and horizontal chunks of z. This suggests an algorithm that loops over pairs of chunks of z: hereafter "columns" and "rows" of z, even though each "column" and "row" in fact contains inplace_chunk_size contiguous true columns and rows of z. Writing output chunks to a new tensor would bring total memory consumption down to 3x the size of z. However, more memory can be saved by writing output chunks directly to z in-place. WLOG, we choose to write output chunks vertically, overwriting the ith "column" of z at the end of the ith iteration of the main loop. Despite this overwriting, the ith column is always one column ahead of previously overwritten columns and can be recovered directly from z. After the first iteration, however, the ith row of z is always at least partially overwritten. For this reason, we introduce the z-cache, a tensor one-half the size of z. The z-cache initially contains the left half (2nd and 3rd quadrants) of z. For 0 < i < N/2, the missing left part of the ith row of z is recovered from this cache at the beginning of the ith iteration. Once i exceeds n/2, the cache is "reoriented" to encompass the 3rd and 4th quadrants of z instead. Though the 3rd quadrant of the original z is entirely overwritten at this point, it can be recovered from the z-cache itself. Thereafter, the ith row of z can be recovered in its entirety from the reoriented z-cache. After the final iteration, z has been completely overwritten and contains the triangular multiplicative update. If with_add is True, it instead contains the sum of z and the triangular multiplicative update. In either case, peak memory consumption is just 2.5x the size of z, disregarding memory used for chunks and other small variables. """ if mask is None: mask = z.new_ones(z.shape[:-1]) mask = mask.unsqueeze(-1) def compute_projection_helper(pair, mask, a=True): if a: linear_g = self.linear_a_g linear_p = self.linear_a_p else: linear_g = self.linear_b_g linear_p = self.linear_b_p pair = self.layer_norm_in(pair) p = linear_g(pair) p.sigmoid_() p *= linear_p(pair) p *= mask p = permute_final_dims(p, (2, 0, 1)) return p def compute_projection(pair, mask, a=True, chunked=True): need_transpose = self._outgoing ^ a if not chunked: p = compute_projection_helper(pair, mask, a) if need_transpose: p = p.transpose(-1, -2) else: # This computation is chunked so as not to exceed our 2.5x # budget with a large intermediate tensor linear_g = self.linear_a_g if a else self.linear_b_g c = linear_g.bias.shape[-1] out_shape = pair.shape[:-3] + (c,) + pair.shape[-3:-1] p = pair.new_zeros(out_shape) for i in range(0, pair.shape[-3], inplace_chunk_size): pair_chunk = pair[..., i : i + inplace_chunk_size, :, :] pair_chunk = compute_projection_helper( pair[..., i : i + inplace_chunk_size, :, :], mask[..., i : i + inplace_chunk_size, :, :], a, ) if need_transpose: pair_chunk = pair_chunk.transpose(-1, -2) p[..., i : i + inplace_chunk_size] = pair_chunk else: p[..., i : i + inplace_chunk_size, :] = pair_chunk del pair_chunk return p # We start by fully manifesting a. In addition to the input, this # brings total memory consumption to 2x z (disregarding size of chunks) # [*, N, N, c] a = compute_projection(z, mask, True, chunked=True) if inplace_chunk_size is not None: n = a.shape[-1] half_n = n // 2 + n % 2 row_dim = -3 col_dim = -2 b_chunk_dim = row_dim if self._outgoing else col_dim def empty_slicer(t): return [slice(None) for _ in t.shape] def slice_tensor(t, start, end, dim): # Slices start:end from the dim dimension of t s = empty_slicer(t) s[dim] = slice(start, end) return t[s] def flip_z_cache_(z_cache, z): # "Reorient" the z_cache (see below), filling it with quadrants # 3---recovered from the z_cache---and 4---recovered from z--- # of the input tensor z. quadrant_3 = slice_tensor(z_cache, half_n, None, row_dim) z_cache = z_cache.transpose(row_dim, col_dim) # If n is odd, we need to shrink the z_cache by one row z_cache = z_cache[..., : (n // 2), :, :] # Move the 3rd quadrant of z into the first_half_slicer = empty_slicer(z_cache) first_half_slicer[col_dim] = slice(0, half_n) z_cache[first_half_slicer] = quadrant_3 # Get the fourth quadrant of z quadrant_4 = slice_tensor(z, half_n, None, row_dim) quadrant_4 = slice_tensor(quadrant_4, half_n, None, col_dim) # Insert said quadrant into the rotated z-cache quadrant_3_slicer = empty_slicer(z_cache) quadrant_3_slicer[col_dim] = slice(half_n, None) z_cache[quadrant_3_slicer] = quadrant_4 return z_cache # Initialize the z cache to the left half of z. z_cache_shape = list(z.shape) z_cache_shape[col_dim] = half_n z_cache = z.new_zeros(z_cache_shape) z_cache_slicer = empty_slicer(z_cache) z_cache_slicer[col_dim] = slice(0, half_n) z_cache.copy_(z[z_cache_slicer]) z_cache_rotated = False # We need to reorient the z-cache at the halfway point, and we # don't want a single chunk to straddle that point. We contract one # of the chunks in the middle to address that problem. i_range = list(range(0, half_n, inplace_chunk_size)) initial_offsets = [i_2 - i_1 for i_1, i_2 in zip(i_range, i_range[1:] + [half_n])] after_half = list(range(half_n, n, inplace_chunk_size)) after_half_offsets = [inplace_chunk_size for _ in after_half] combined_range_with_offsets = zip(i_range + after_half, initial_offsets + after_half_offsets) for i, offset in combined_range_with_offsets: if not z_cache_rotated and i >= half_n: z_cache = flip_z_cache_(z_cache, z) z_cache_rotated = True z_chunk_b = slice_tensor(z, i, i + offset, b_chunk_dim) mask_chunk = slice_tensor(mask, i, i + offset, b_chunk_dim) z_chunk_b = z_chunk_b.clone() if b_chunk_dim == col_dim: z_chunk_b = slice_tensor(z, i, i + offset, col_dim) else: # b_chunk_dim == row_dim # In this case, the b-dimension (b_chunk_dim) is partially # overwritten at the end of each iteration. We need to # restore the missing component from the z-cache. if not z_cache_rotated: z_chunk_slicer = empty_slicer(z_chunk_b) z_chunk_slicer[col_dim] = slice(0, half_n) z_chunk_b[z_chunk_slicer] = slice_tensor(z_cache, i, i + offset, row_dim) else: z_cache_offset = i - half_n z_chunk_b = slice_tensor(z_cache, z_cache_offset, z_cache_offset + offset, row_dim) b_chunk = compute_projection(z_chunk_b, mask_chunk, a=False, chunked=False) del z_chunk_b x_chunk = torch.matmul(a, b_chunk) x_chunk = permute_final_dims(x_chunk, (1, 2, 0)) x_chunk = self.layer_norm_out(x_chunk) x_chunk = self.linear_z(x_chunk) # The g dimension (col_dim) is parallel to and ahead of the # overwrites in z. We can extract the g chunk normally. z_chunk_g = slice_tensor(z, i, i + offset, col_dim) g_chunk = self.linear_g(self.layer_norm_in(z_chunk_g)) g_chunk.sigmoid_() del z_chunk_g x_chunk *= g_chunk # Write the columns into z in-place z_slicer = empty_slicer(z) z_slicer[col_dim] = slice(i, i + offset) if with_add: z[z_slicer] += x_chunk else: z[z_slicer] = x_chunk else: b = compute_projection(z, mask, False, False) x = torch.matmul(a, b) x = self.layer_norm_out(x) x = self.linear_z(x) g = self.linear_g(z) g.sigmoid_() x *= g if with_add: z += x else: z = x return z def forward( self, z: torch.Tensor, mask: Optional[torch.Tensor] = None, inplace_safe: bool = False, _add_with_inplace: bool = False, _inplace_chunk_size: Optional[int] = 256, ) -> torch.Tensor: """ Args: x: [*, N_res, N_res, C_z] input tensor mask: [*, N_res, N_res] input mask Returns: [*, N_res, N_res, C_z] output tensor """ if inplace_safe: x = self._inference_forward( z, mask, inplace_chunk_size=_inplace_chunk_size, with_add=_add_with_inplace, ) return x if mask is None: mask = z.new_ones(z.shape[:-1]) mask = mask.unsqueeze(-1) z = self.layer_norm_in(z) a = mask a = a * self.sigmoid(self.linear_a_g(z)) a = a * self.linear_a_p(z) b = mask b = b * self.sigmoid(self.linear_b_g(z)) b = b * self.linear_b_p(z) if is_fp16_enabled(): with torch.cuda.amp.autocast(enabled=False): x = self._combine_projections(a.float(), b.float()) else: x = self._combine_projections(a, b) del a, b x = self.layer_norm_out(x) x = self.linear_z(x) g = self.sigmoid(self.linear_g(z)) x = x * g return x class EsmFoldPreTrainedModel(EsmPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ # Subclass `EsMPreTrainedModel` to deal with special init def _init_weights(self, module): """Initialize the weights""" if isinstance(module, EsmFoldLinear): with torch.no_grad(): if module.init_fn is not None: module.init_fn(module.weight, module.bias) elif module.init == "default": trunc_normal_init_(module.weight, scale=1.0) elif module.init == "relu": trunc_normal_init_(module.weight, scale=2.0) elif module.init == "glorot": nn.init.xavier_uniform_(module.weight, gain=1) elif module.init == "gating": module.weight.fill_(0.0) if module.bias: module.bias.fill_(1.0) elif module.init == "normal": torch.nn.init.kaiming_normal_(module.weight, nonlinearity="linear") elif module.init == "final": module.weight.fill_(0.0) elif isinstance(module, EsmFoldInvariantPointAttention): ipa_point_weights_init_(module.head_weights) elif isinstance(module, EsmFoldTriangularSelfAttentionBlock): torch.nn.init.zeros_(module.tri_mul_in.linear_z.weight) torch.nn.init.zeros_(module.tri_mul_in.linear_z.bias) torch.nn.init.zeros_(module.tri_mul_out.linear_z.weight) torch.nn.init.zeros_(module.tri_mul_out.linear_z.bias) torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.weight) torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.bias) torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.weight) torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.bias) torch.nn.init.zeros_(module.sequence_to_pair.o_proj.weight) torch.nn.init.zeros_(module.sequence_to_pair.o_proj.bias) torch.nn.init.zeros_(module.pair_to_sequence.linear.weight) torch.nn.init.zeros_(module.seq_attention.o_proj.weight) torch.nn.init.zeros_(module.seq_attention.o_proj.bias) torch.nn.init.zeros_(module.mlp_seq.mlp[-2].weight) torch.nn.init.zeros_(module.mlp_seq.mlp[-2].bias) torch.nn.init.zeros_(module.mlp_pair.mlp[-2].weight) torch.nn.init.zeros_(module.mlp_pair.mlp[-2].bias) else: super()._init_weights(module) class EsmFoldSelfAttention(nn.Module): def __init__(self, embed_dim, num_heads, head_width, gated=False): super().__init__() assert embed_dim == num_heads * head_width self.embed_dim = embed_dim self.num_heads = num_heads self.head_width = head_width self.proj = nn.Linear(embed_dim, embed_dim * 3, bias=False) self.o_proj = nn.Linear(embed_dim, embed_dim, bias=True) self.gated = gated if gated: self.g_proj = nn.Linear(embed_dim, embed_dim) torch.nn.init.zeros_(self.g_proj.weight) torch.nn.init.ones_(self.g_proj.bias) self.rescale_factor = self.head_width**-0.5 torch.nn.init.zeros_(self.o_proj.bias) def forward(self, x, mask=None, bias=None, indices=None): """ Basic self attention with optional mask and external pairwise bias. To handle sequences of different lengths, use mask. Inputs: x: batch of input sequneces (.. x L x C) mask: batch of boolean masks where 1=valid, 0=padding position (.. x L_k) bias: batch of scalar pairwise attention biases (.. x Lq x Lk x num_heads) Outputs: sequence projection (B x L x embed_dim), attention maps (B x L x L x num_heads) """ t = self.proj(x).view(*x.shape[:2], self.num_heads, -1) t = t.permute(0, 2, 1, 3) q, k, v = t.chunk(3, dim=-1) q = self.rescale_factor * q a = torch.einsum("...qc,...kc->...qk", q, k) # Add external attention bias. if bias is not None: a = a + bias.permute(0, 3, 1, 2) # Do not attend to padding tokens. if mask is not None: mask = mask[:, None, None] a = a.masked_fill(mask == False, -np.inf) # noqa: E712 a = nn.functional.softmax(a, dim=-1) y = torch.einsum("...hqk,...hkc->...qhc", a, v) y = y.reshape(*y.shape[:2], -1) if self.gated: y = self.g_proj(x).sigmoid() * y y = self.o_proj(y) return y, a.permute(0, 3, 1, 2) class EsmFoldDropout(nn.Module): """ Implementation of dropout with the ability to share the dropout mask along a particular dimension. """ def __init__(self, r: float, batch_dim: Union[int, List[int]]): super().__init__() self.r = r if isinstance(batch_dim, int): batch_dim = [batch_dim] self.batch_dim = batch_dim self.dropout = nn.Dropout(self.r) def forward(self, x: torch.Tensor) -> torch.Tensor: shape = list(x.shape) if self.batch_dim is not None: for bd in self.batch_dim: shape[bd] = 1 return x * self.dropout(x.new_ones(shape)) class EsmFoldSequenceToPair(nn.Module): def __init__(self, sequence_state_dim, inner_dim, pairwise_state_dim): super().__init__() self.layernorm = nn.LayerNorm(sequence_state_dim) self.proj = nn.Linear(sequence_state_dim, inner_dim * 2, bias=True) self.o_proj = nn.Linear(2 * inner_dim, pairwise_state_dim, bias=True) torch.nn.init.zeros_(self.proj.bias) torch.nn.init.zeros_(self.o_proj.bias) def forward(self, sequence_state): """ Inputs: sequence_state: B x L x sequence_state_dim Output: pairwise_state: B x L x L x pairwise_state_dim Intermediate state: B x L x L x 2*inner_dim """ assert len(sequence_state.shape) == 3 s = self.layernorm(sequence_state) s = self.proj(s) q, k = s.chunk(2, dim=-1) prod = q[:, None, :, :] * k[:, :, None, :] diff = q[:, None, :, :] - k[:, :, None, :] x = torch.cat([prod, diff], dim=-1) x = self.o_proj(x) return x class EsmFoldPairToSequence(nn.Module): def __init__(self, pairwise_state_dim, num_heads): super().__init__() self.layernorm = nn.LayerNorm(pairwise_state_dim) self.linear = nn.Linear(pairwise_state_dim, num_heads, bias=False) def forward(self, pairwise_state): """ Inputs: pairwise_state: B x L x L x pairwise_state_dim Output: pairwise_bias: B x L x L x num_heads """ assert len(pairwise_state.shape) == 4 z = self.layernorm(pairwise_state) pairwise_bias = self.linear(z) return pairwise_bias class EsmFoldResidueMLP(nn.Module): def __init__(self, embed_dim, inner_dim, dropout=0): super().__init__() self.mlp = nn.Sequential( nn.LayerNorm(embed_dim), nn.Linear(embed_dim, inner_dim), nn.ReLU(), nn.Linear(inner_dim, embed_dim), nn.Dropout(dropout), ) def forward(self, x): return x + self.mlp(x) class EsmFoldTriangularSelfAttentionBlock(nn.Module): def __init__(self, config): super().__init__() self.config = config sequence_state_dim = config.sequence_state_dim pairwise_state_dim = config.pairwise_state_dim sequence_num_heads = sequence_state_dim // config.sequence_head_width pairwise_num_heads = pairwise_state_dim // config.pairwise_head_width self.layernorm_1 = nn.LayerNorm(sequence_state_dim) self.sequence_to_pair = EsmFoldSequenceToPair(sequence_state_dim, pairwise_state_dim // 2, pairwise_state_dim) self.pair_to_sequence = EsmFoldPairToSequence(pairwise_state_dim, sequence_num_heads) self.seq_attention = EsmFoldSelfAttention( sequence_state_dim, sequence_num_heads, config.sequence_head_width, gated=True ) self.tri_mul_out = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=True) self.tri_mul_in = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=False) self.tri_att_start = EsmFoldTriangleAttention( pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1e9, starting=True ) self.tri_att_end = EsmFoldTriangleAttention( pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1e9, starting=False ) self.mlp_seq = EsmFoldResidueMLP(sequence_state_dim, 4 * sequence_state_dim, dropout=config.dropout) self.mlp_pair = EsmFoldResidueMLP(pairwise_state_dim, 4 * pairwise_state_dim, dropout=config.dropout) self.drop = nn.Dropout(config.dropout) self.row_drop = EsmFoldDropout(config.dropout * 2, 2) self.col_drop = EsmFoldDropout(config.dropout * 2, 1) def forward(self, sequence_state, pairwise_state, mask=None, chunk_size=None, **__kwargs): """ Inputs: sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim mask: B x L boolean tensor of valid positions Output: sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim """ if len(sequence_state.shape) != 3: raise ValueError(f"`sequence_state` should be a 3d-tensor, got {len(sequence_state.shape)} dims.") if len(pairwise_state.shape) != 4: raise ValueError(f"`pairwise_state` should be a 4d-tensor, got {len(pairwise_state.shape)} dims.") if mask is not None and len(mask.shape) != 2: raise ValueError(f"`mask` should be a 2d-tensor, got {len(mask.shape)} dims.") batch_dim, seq_dim, sequence_state_dim = sequence_state.shape pairwise_state_dim = pairwise_state.shape[3] if sequence_state_dim != self.config.sequence_state_dim: raise ValueError( "`sequence_state` last dimension should be equal to `self.sequence_state_dim`. Got " f"{sequence_state_dim} != {self.config.sequence_state_dim}." ) if pairwise_state_dim != self.config.pairwise_state_dim: raise ValueError( "`pairwise_state` last dimension should be equal to `self.pairwise_state_dim`. Got " f"{pairwise_state_dim} != {self.config.pairwise_state_dim}." ) if batch_dim != pairwise_state.shape[0]: raise ValueError( f"`sequence_state` and `pairwise_state` have inconsistent batch size: {batch_dim} != " f"{pairwise_state.shape[0]}." ) if seq_dim != pairwise_state.shape[1] or seq_dim != pairwise_state.shape[2]: raise ValueError( f"`sequence_state` and `pairwise_state` have inconsistent sequence length: {seq_dim} != " f"{pairwise_state.shape[1]} or {pairwise_state.shape[2]}." ) # Update sequence state bias = self.pair_to_sequence(pairwise_state) # Self attention with bias + mlp. y = self.layernorm_1(sequence_state) y, _ = self.seq_attention(y, mask=mask, bias=bias) sequence_state = sequence_state + self.drop(y) sequence_state = self.mlp_seq(sequence_state) # Update pairwise state pairwise_state = pairwise_state + self.sequence_to_pair(sequence_state) # Axial attention with triangular bias. tri_mask = mask.unsqueeze(2) * mask.unsqueeze(1) if mask is not None else None pairwise_state = pairwise_state + self.row_drop(self.tri_mul_out(pairwise_state, mask=tri_mask)) pairwise_state = pairwise_state + self.col_drop(self.tri_mul_in(pairwise_state, mask=tri_mask)) pairwise_state = pairwise_state + self.row_drop( self.tri_att_start(pairwise_state, mask=tri_mask, chunk_size=chunk_size) ) pairwise_state = pairwise_state + self.col_drop( self.tri_att_end(pairwise_state, mask=tri_mask, chunk_size=chunk_size) ) # MLP over pairs. pairwise_state = self.mlp_pair(pairwise_state) return sequence_state, pairwise_state class EsmCategoricalMixture: def __init__(self, param, bins=50, start=0, end=1): # All tensors are of shape ..., bins. self.logits = param bins = torch.linspace(start, end, bins + 1, device=self.logits.device, dtype=self.logits.dtype) self.v_bins = (bins[:-1] + bins[1:]) / 2 def log_prob(self, true): # Shapes are: # self.probs: ... x bins # true : ... true_index = (true.unsqueeze(-1) - self.v_bins[[None] * true.ndim]).abs().argmin(-1) nll = self.logits.log_softmax(-1) return torch.take_along_dim(nll, true_index.unsqueeze(-1), dim=-1).squeeze(-1) def mean(self): return (self.logits.softmax(-1) @ self.v_bins.unsqueeze(1)).squeeze(-1) def categorical_lddt(logits, bins=50): # Logits are ..., 37, bins. return EsmCategoricalMixture(logits, bins=bins).mean() def get_axial_mask(mask): """ Helper to convert B x L mask of valid positions to axial mask used in row column attentions. Input: mask: B x L tensor of booleans Output: mask: B x L x L tensor of booleans """ if mask is None: return None if len(mask.shape) != 2: raise ValueError(f"`mask` should be a 2d-tensor, got {len(mask.shape)} dims.") batch_dim, seq_dim = mask.shape m = mask.unsqueeze(1).expand(batch_dim, seq_dim, seq_dim) m = m.reshape(batch_dim * seq_dim, seq_dim) return m class EsmFoldRelativePosition(nn.Module): def __init__(self, config): super().__init__() self.bins = config.position_bins # Note an additional offset is used so that the 0th position # is reserved for masked pairs. self.embedding = torch.nn.Embedding(2 * self.bins + 2, config.pairwise_state_dim) def forward(self, residue_index, mask=None): """ Input: residue_index: B x L tensor of indices (dtype=torch.long) mask: B x L tensor of booleans Output: pairwise_state: B x L x L x pairwise_state_dim tensor of embeddings """ if residue_index.dtype != torch.long: raise ValueError(f"`residue_index` has dtype {residue_index.dtype}, it should be `torch.long`.") if mask is not None and residue_index.shape != mask.shape: raise ValueError( f"`residue_index` and `mask` have inconsistent shapes: {residue_index.shape} != {mask.shape}." ) diff = residue_index[:, None, :] - residue_index[:, :, None] diff = diff.clamp(-self.bins, self.bins) diff = diff + self.bins + 1 # Add 1 to adjust for padding index. if mask is not None: mask = mask[:, None, :] * mask[:, :, None] diff[mask == False] = 0 # noqa: E712 output = self.embedding(diff) return output class EsmFoldAngleResnetBlock(nn.Module): def __init__(self, config): super().__init__() self.linear_1 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init="relu") self.linear_2 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init="final") self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class EsmFoldAngleResnet(nn.Module): """ Implements Algorithm 20, lines 11-14 """ def __init__(self, config): super().__init__() self.config = config self.linear_in = EsmFoldLinear(config.sequence_dim, config.resnet_dim) self.linear_initial = EsmFoldLinear(config.sequence_dim, config.resnet_dim) self.layers = nn.ModuleList() for _ in range(config.num_resnet_blocks): layer = EsmFoldAngleResnetBlock(config) self.layers.append(layer) self.linear_out = EsmFoldLinear(config.resnet_dim, config.num_angles * 2) self.relu = nn.ReLU() def forward(self, s: torch.Tensor, s_initial: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # NOTE: The ReLU's applied to the inputs are absent from the supplement # pseudocode but present in the source. For maximal compatibility with # the pretrained weights, I'm going with the source. # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s**2, dim=-1, keepdim=True), min=self.config.epsilon, ) ) s = s / norm_denom return unnormalized_s, s class EsmFoldInvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__(self, config): super().__init__() self.config = config c_s = config.sequence_dim c_z = config.pairwise_dim self.hidden_dim = config.ipa_dim self.num_heads = config.num_heads_ipa self.num_qk_points = config.num_qk_points self.num_v_points = config.num_v_points # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = config.ipa_dim * config.num_heads_ipa self.linear_q = EsmFoldLinear(c_s, hc) self.linear_kv = EsmFoldLinear(c_s, 2 * hc) hpq = config.num_heads_ipa * config.num_qk_points * 3 self.linear_q_points = EsmFoldLinear(c_s, hpq) hpkv = config.num_heads_ipa * (config.num_qk_points + config.num_v_points) * 3 self.linear_kv_points = EsmFoldLinear(c_s, hpkv) self.linear_b = EsmFoldLinear(c_z, config.num_heads_ipa) self.head_weights = nn.Parameter(torch.zeros((config.num_heads_ipa))) concat_out_dim = config.num_heads_ipa * (c_z + config.ipa_dim + config.num_v_points * 4) self.linear_out = EsmFoldLinear(concat_out_dim, c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.num_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.num_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.hidden_dim, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view(q_pts.shape[:-2] + (self.num_heads, self.num_qk_points, 3)) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.num_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split(kv_pts, [self.num_qk_points, self.num_v_points], dim=-2) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if _offload_inference: assert sys.getrefcount(z[0]) == 2 z[0] = z[0].cpu() # [*, H, N_res, N_res] if is_fp16_enabled(): with torch.cuda.amp.autocast(enabled=False): a = torch.matmul( permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res] ) else: a = torch.matmul( permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res] ) a *= math.sqrt(1.0 / (3 * self.hidden_dim)) a += math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1)) # [*, N_res, N_res, H, P_q, 3] pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) pt_att = pt_att**2 # [*, N_res, N_res, H, P_q] pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view(*((1,) * len(pt_att.shape[:-2]) + (-1, 1))) head_weights = head_weights * math.sqrt(1.0 / (3 * (self.num_qk_points * 9.0 / 2))) pt_att = pt_att * head_weights # [*, N_res, N_res, H] pt_att = torch.sum(pt_att, dim=-1) * (-0.5) # [*, N_res, N_res] square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.config.inf * (square_mask - 1) # [*, H, N_res, N_res] pt_att = permute_final_dims(pt_att, (2, 0, 1)) a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) ################ # Compute output ################ # [*, N_res, H, C_hidden] o = torch.matmul(a, v.transpose(-2, -3).to(dtype=a.dtype)).transpose(-2, -3) # [*, N_res, H * C_hidden] o = flatten_final_dims(o, 2) # [*, H, 3, N_res, P_v] o_pt = torch.sum( (a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :]), dim=-2, ) # [*, N_res, H, P_v, 3] o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) # [*, N_res, H * P_v] o_pt_norm = flatten_final_dims(torch.sqrt(torch.sum(o_pt**2, dim=-1) + self.config.epsilon), 2) # [*, N_res, H * P_v, 3] o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if _offload_inference: z[0] = z[0].to(o_pt.device) # [*, N_res, H, C_z] o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) # [*, N_res, H * C_z] o_pair = flatten_final_dims(o_pair, 2) # [*, N_res, C_s] s = self.linear_out( torch.cat((o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1).to(dtype=z[0].dtype) ) return s class EsmFoldBackboneUpdate(nn.Module): """ Implements part of Algorithm 23. """ def __init__(self, config): super().__init__() self.linear = EsmFoldLinear(config.sequence_dim, 6, init="final") def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: [*, N_res, C_s] single representation Returns: [*, N_res, 6] update vector """ # [*, 6] update = self.linear(s) return update class EsmFoldStructureModuleTransitionLayer(nn.Module): def __init__(self, config): super().__init__() self.linear_1 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="relu") self.linear_2 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="relu") self.linear_3 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="final") self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class EsmFoldStructureModuleTransition(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layers = nn.ModuleList() for _ in range(config.num_transition_layers): l = EsmFoldStructureModuleTransitionLayer(config) self.layers.append(l) self.dropout = nn.Dropout(config.dropout_rate) self.layer_norm = LayerNorm(config.sequence_dim) def forward(self, s): for l in self.layers: s = l(s) s = self.dropout(s) s = self.layer_norm(s) return s class EsmFoldStructureModule(nn.Module): def __init__(self, config): super().__init__() self.config = config # Buffers to be lazily initialized later # self.default_frames # self.group_idx # self.atom_mask # self.lit_positions self.layer_norm_s = LayerNorm(config.sequence_dim) self.layer_norm_z = LayerNorm(config.pairwise_dim) self.linear_in = EsmFoldLinear(config.sequence_dim, config.sequence_dim) self.ipa = EsmFoldInvariantPointAttention(config) self.ipa_dropout = nn.Dropout(config.dropout_rate) self.layer_norm_ipa = LayerNorm(config.sequence_dim) self.transition = EsmFoldStructureModuleTransition(config) self.bb_update = EsmFoldBackboneUpdate(config) self.angle_resnet = EsmFoldAngleResnet(config) def forward( self, evoformer_output_dict, aatype, mask=None, _offload_inference=False, ): """ Args: evoformer_output_dict: Dictionary containing: "single": [*, N_res, C_s] single representation "pair": [*, N_res, N_res, C_z] pair representation aatype: [*, N_res] amino acid indices mask: Optional [*, N_res] sequence mask Returns: A dictionary of outputs """ s = evoformer_output_dict["single"] if mask is None: # [*, N] mask = s.new_ones(s.shape[:-1]) # [*, N, C_s] s = self.layer_norm_s(s) # [*, N, N, C_z] z = self.layer_norm_z(evoformer_output_dict["pair"]) z_reference_list = None if _offload_inference: assert sys.getrefcount(evoformer_output_dict["pair"]) == 2 evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu() z_reference_list = [z] z = None # [*, N, C_s] s_initial = s s = self.linear_in(s) # [*, N] rigids = Rigid.identity( s.shape[:-1], s.dtype, s.device, self.training, fmt="quat", ) outputs = [] for i in range(self.config.num_blocks): # [*, N, C_s] s = s + self.ipa( s, z, rigids, mask, _offload_inference=_offload_inference, _z_reference_list=z_reference_list, ) s = self.ipa_dropout(s) s = self.layer_norm_ipa(s) s = self.transition(s) # [*, N] rigids = rigids.compose_q_update_vec(self.bb_update(s)) # To hew as closely as possible to AlphaFold, we convert our # quaternion-based transformations to rotation-matrix ones # here backb_to_global = Rigid( Rotation(rot_mats=rigids.get_rots().get_rot_mats(), quats=None), rigids.get_trans(), ) backb_to_global = backb_to_global.scale_translation(self.config.trans_scale_factor) # [*, N, 7, 2] unnormalized_angles, angles = self.angle_resnet(s, s_initial) all_frames_to_global = self.torsion_angles_to_frames(backb_to_global, angles, aatype) pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype) scaled_rigids = rigids.scale_translation(self.config.trans_scale_factor) preds = { "frames": scaled_rigids.to_tensor_7(), "sidechain_frames": all_frames_to_global.to_tensor_4x4(), "unnormalized_angles": unnormalized_angles, "angles": angles, "positions": pred_xyz, "states": s, } outputs.append(preds) rigids = rigids.stop_rot_gradient() del z, z_reference_list if _offload_inference: evoformer_output_dict["pair"] = evoformer_output_dict["pair"].to(s.device) outputs = dict_multimap(torch.stack, outputs) outputs["single"] = s return outputs def _init_residue_constants(self, float_dtype, device): if not hasattr(self, "default_frames"): self.register_buffer( "default_frames", torch.tensor( residue_constants.restype_rigid_group_default_frame, dtype=float_dtype, device=device, requires_grad=False, ), persistent=False, ) if not hasattr(self, "group_idx"): self.register_buffer( "group_idx", torch.tensor( residue_constants.restype_atom14_to_rigid_group, device=device, requires_grad=False, ), persistent=False, ) if not hasattr(self, "atom_mask"): self.register_buffer( "atom_mask", torch.tensor( residue_constants.restype_atom14_mask, dtype=float_dtype, device=device, requires_grad=False, ), persistent=False, ) if not hasattr(self, "lit_positions"): self.register_buffer( "lit_positions", torch.tensor( residue_constants.restype_atom14_rigid_group_positions, dtype=float_dtype, device=device, requires_grad=False, ), persistent=False, ) def torsion_angles_to_frames(self, r, alpha, f): # Lazily initialize the residue constants on the correct device self._init_residue_constants(alpha.dtype, alpha.device) # Separated purely to make testing less annoying return torsion_angles_to_frames(r, alpha, f, self.default_frames) def frames_and_literature_positions_to_atom14_pos(self, r, f): # [*, N, 8] # [*, N] # Lazily initialize the residue constants on the correct device self._init_residue_constants(r.get_rots().dtype, r.get_rots().device) return frames_and_literature_positions_to_atom14_pos( r, f, self.default_frames, self.group_idx, self.atom_mask, self.lit_positions, ) class EsmFoldingTrunk(nn.Module): def __init__(self, config): super().__init__() self.config = config c_s = config.sequence_state_dim c_z = config.pairwise_state_dim self.pairwise_positional_embedding = EsmFoldRelativePosition(config) self.blocks = nn.ModuleList([EsmFoldTriangularSelfAttentionBlock(config) for _ in range(config.num_blocks)]) self.recycle_bins = 15 self.recycle_s_norm = nn.LayerNorm(c_s) self.recycle_z_norm = nn.LayerNorm(c_z) self.recycle_disto = nn.Embedding(self.recycle_bins, c_z) self.recycle_disto.weight[0].detach().zero_() self.structure_module = EsmFoldStructureModule(config.structure_module) self.trunk2sm_s = nn.Linear(c_s, config.structure_module.sequence_dim) self.trunk2sm_z = nn.Linear(c_z, config.structure_module.pairwise_dim) self.chunk_size = config.chunk_size def set_chunk_size(self, chunk_size): # This parameter means the axial attention will be computed # in a chunked manner. This should make the memory used more or less O(L) instead of O(L^2). # It's equivalent to running a for loop over chunks of the dimension we're iterative over, # where the chunk_size is the size of the chunks, so 128 would mean to parse 128-length chunks. self.chunk_size = chunk_size def forward(self, seq_feats, pair_feats, true_aa, residx, mask, no_recycles): """ Inputs: seq_feats: B x L x C tensor of sequence features pair_feats: B x L x L x C tensor of pair features residx: B x L long tensor giving the position in the sequence mask: B x L boolean tensor indicating valid residues Output: predicted_structure: B x L x (num_atoms_per_residue * 3) tensor wrapped in a Coordinates object """ device = seq_feats.device s_s_0 = seq_feats s_z_0 = pair_feats if no_recycles is None: no_recycles = self.config.max_recycles else: if no_recycles < 0: raise ValueError("Number of recycles must not be negative.") no_recycles += 1 # First 'recycle' is just the standard forward pass through the model. def trunk_iter(s, z, residx, mask): z = z + self.pairwise_positional_embedding(residx, mask=mask) for block in self.blocks: s, z = block(s, z, mask=mask, residue_index=residx, chunk_size=self.chunk_size) return s, z s_s = s_s_0 s_z = s_z_0 recycle_s = torch.zeros_like(s_s) recycle_z = torch.zeros_like(s_z) recycle_bins = torch.zeros(*s_z.shape[:-1], device=device, dtype=torch.int64) for recycle_idx in range(no_recycles): with ContextManagers([] if recycle_idx == no_recycles - 1 else [torch.no_grad()]): # === Recycling === recycle_s = self.recycle_s_norm(recycle_s.detach()).to(device) recycle_z = self.recycle_z_norm(recycle_z.detach()).to(device) recycle_z += self.recycle_disto(recycle_bins.detach()).to(device) s_s, s_z = trunk_iter(s_s_0 + recycle_s, s_z_0 + recycle_z, residx, mask) # === Structure module === structure = self.structure_module( {"single": self.trunk2sm_s(s_s), "pair": self.trunk2sm_z(s_z)}, true_aa, mask.float(), ) recycle_s = s_s recycle_z = s_z # Distogram needs the N, CA, C coordinates, and bin constants same as alphafold. recycle_bins = EsmFoldingTrunk.distogram( structure["positions"][-1][:, :, :3], 3.375, 21.375, self.recycle_bins, ) structure["s_s"] = s_s structure["s_z"] = s_z return structure @staticmethod def distogram(coords, min_bin, max_bin, num_bins): # Coords are [... L x 3 x 3], where it's [N, CA, C] x 3 coordinates. boundaries = torch.linspace( min_bin, max_bin, num_bins - 1, device=coords.device, ) boundaries = boundaries**2 N, CA, C = [x.squeeze(-2) for x in coords.chunk(3, dim=-2)] # Infer CB coordinates. b = CA - N c = C - CA a = b.cross(c, dim=-1) CB = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + CA dists = (CB[..., None, :, :] - CB[..., :, None, :]).pow(2).sum(dim=-1, keepdims=True) bins = torch.sum(dists > boundaries, dim=-1) # [..., L, L] return bins # TODO Add information to the docstring about any methods that convert to PDB format, or otherwise prepare # the outputs for downstream use. @add_start_docstrings( """ ESMForProteinFolding is the HuggingFace port of the original ESMFold model. It consists of an ESM-2 "stem" followed by a protein folding "head", although unlike most other output heads, this "head" is similar in size and runtime to the rest of the model combined! It outputs a dictionary containing predicted structural information about the input protein(s). """, ESM_START_DOCSTRING, ) class EsmForProteinFolding(EsmPreTrainedModel): _no_split_modules = ["EsmFoldStructureModule", "EsmFoldTriangularSelfAttentionBlock"] def __init__(self, config): super().__init__(config) self.config = config self.distogram_bins = 64 self.esm = EsmModel(config, add_pooling_layer=False) self.esm.requires_grad_(False) if self.config.esmfold_config.fp16_esm: self.esm.half() self.esm_feats = self.config.hidden_size self.esm_attns = self.config.num_hidden_layers * self.config.num_attention_heads self.esm_layers = self.config.num_hidden_layers self.register_buffer("af2_to_esm", self._af2_to_esm_from_vocab_list(config.vocab_list)) self.esm_s_combine = nn.Parameter(torch.zeros(self.esm_layers + 1)) trunk_config = self.config.esmfold_config.trunk c_s = trunk_config.sequence_state_dim c_z = trunk_config.pairwise_state_dim self.esm_s_mlp = nn.Sequential( LayerNorm(self.esm_feats), nn.Linear(self.esm_feats, c_s), nn.ReLU(), nn.Linear(c_s, c_s), ) # 0 is padding, N is unknown residues, N + 1 is mask. self.n_tokens_embed = residue_constants.restype_num + 3 self.pad_idx = 0 self.unk_idx = self.n_tokens_embed - 2 self.mask_idx = self.n_tokens_embed - 1 self.esm_dict_cls_idx = self.config.vocab_list.index("<cls>") self.esm_dict_mask_idx = self.config.vocab_list.index("<mask>") self.esm_dict_eos_idx = self.config.vocab_list.index("<eos>") self.esm_dict_padding_idx = self.config.vocab_list.index("<pad>") if self.config.esmfold_config.embed_aa: self.embedding = nn.Embedding(self.n_tokens_embed, c_s, padding_idx=0) self.trunk = EsmFoldingTrunk(trunk_config) self.distogram_head = nn.Linear(c_z, self.distogram_bins) self.ptm_head = nn.Linear(c_z, self.distogram_bins) self.lm_head = nn.Linear(c_s, self.n_tokens_embed) self.lddt_bins = 50 structure_module_config = trunk_config.structure_module self.lddt_head = nn.Sequential( nn.LayerNorm(structure_module_config.sequence_dim), nn.Linear(structure_module_config.sequence_dim, self.config.esmfold_config.lddt_head_hid_dim), nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, self.config.esmfold_config.lddt_head_hid_dim), nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, 37 * self.lddt_bins), ) @staticmethod def _af2_to_esm_from_vocab_list(vocab_list: List[str]) -> torch.Tensor: # Remember that t is shifted from residue_constants by 1 (0 is padding). esm_reorder = [vocab_list.index("<pad>")] + [vocab_list.index(v) for v in residue_constants.restypes_with_x] return torch.tensor(esm_reorder) @add_start_docstrings_to_model_forward(ESMFOLD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=EsmForProteinFoldingOutput, config_class=EsmConfig) def forward( self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, masking_pattern: Optional[torch.Tensor] = None, num_recycles: Optional[int] = None, ) -> EsmForProteinFoldingOutput: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, EsmForProteinFolding >>> model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/esmfold_v1") >>> inputs = tokenizer(["MLKNVQVQLV"], return_tensors="pt", add_special_tokens=False) # A tiny random peptide >>> outputs = model(**inputs) >>> folded_positions = outputs.positions ``` """ cfg = self.config.esmfold_config aa = input_ids # B x L B = aa.shape[0] L = aa.shape[1] device = input_ids.device if attention_mask is None: attention_mask = torch.ones_like(aa, device=device) if position_ids is None: position_ids = torch.arange(L, device=device).expand_as(input_ids) # === ESM === esmaa = self.af2_idx_to_esm_idx(aa, attention_mask) if masking_pattern is not None: masked_aa, esmaa, mlm_targets = self.bert_mask(aa, esmaa, attention_mask, masking_pattern) else: masked_aa = aa mlm_targets = None # We get sequence and pair representations from whatever version of ESM / # configuration we are using. The sequence representation esm_s is always # present. The pair embedding esm_z may be present depending on the # configuration of the model. If esm_z is not used by the model then it # is returned as None here. esm_s = self.compute_language_model_representations(esmaa) # Convert esm_s and esm_z, if present, to the precision used by the trunk and # the structure module. These tensors may be a lower precision if, for example, # we're running the language model in fp16 precision. esm_s = esm_s.to(self.esm_s_combine.dtype) if cfg.esm_ablate_sequence: esm_s = esm_s * 0 esm_s = esm_s.detach() # === preprocessing === esm_s = (self.esm_s_combine.softmax(0).unsqueeze(0) @ esm_s).squeeze(2) s_s_0 = self.esm_s_mlp(esm_s) s_z_0 = s_s_0.new_zeros(B, L, L, cfg.trunk.pairwise_state_dim) if self.config.esmfold_config.embed_aa: s_s_0 += self.embedding(masked_aa) structure: dict = self.trunk(s_s_0, s_z_0, aa, position_ids, attention_mask, no_recycles=num_recycles) # Documenting what we expect: structure = { k: v for k, v in structure.items() if k in [ "s_z", "s_s", "frames", "sidechain_frames", "unnormalized_angles", "angles", "positions", "states", ] } # Add BERT mask for the loss to use, if available. if mlm_targets: structure["mlm_targets"] = mlm_targets disto_logits = self.distogram_head(structure["s_z"]) disto_logits = (disto_logits + disto_logits.transpose(1, 2)) / 2 structure["distogram_logits"] = disto_logits lm_logits = self.lm_head(structure["s_s"]) structure["lm_logits"] = lm_logits structure["aatype"] = aa make_atom14_masks(structure) # Of course, this doesn't respect the true mask because it doesn't know about it... # We're not going to properly mask change of index tensors: # "residx_atom14_to_atom37", # "residx_atom37_to_atom14", for k in [ "atom14_atom_exists", "atom37_atom_exists", ]: structure[k] *= attention_mask.unsqueeze(-1) structure["residue_index"] = position_ids lddt_head = self.lddt_head(structure["states"]).reshape(structure["states"].shape[0], B, L, -1, self.lddt_bins) structure["lddt_head"] = lddt_head plddt = categorical_lddt(lddt_head[-1], bins=self.lddt_bins) structure["plddt"] = plddt ptm_logits = self.ptm_head(structure["s_z"]) structure["ptm_logits"] = ptm_logits structure["ptm"] = compute_tm(ptm_logits, max_bin=31, no_bins=self.distogram_bins) structure.update(compute_predicted_aligned_error(ptm_logits, max_bin=31, no_bins=self.distogram_bins)) return EsmForProteinFoldingOutput(**structure) def af2_idx_to_esm_idx(self, aa, mask): # avoid indexing on different devices if self.af2_to_esm.device != aa.device: self.af2_to_esm = self.af2_to_esm.to(aa.device) aa = (aa + 1).masked_fill(mask != 1, 0) return self.af2_to_esm[aa] def compute_language_model_representations(self, esmaa: torch.Tensor) -> torch.Tensor: device = next(self.parameters()).device B, L = esmaa.shape # B = batch size, L = sequence length. if self.config.esmfold_config.bypass_lm: esm_s = torch.zeros(B, L, self.esm_s_combine.size[0], -1, self.esm_feats, device=device) return esm_s bosi, eosi = self.esm_dict_cls_idx, self.esm_dict_eos_idx bos = esmaa.new_full((B, 1), bosi) eos = esmaa.new_full((B, 1), self.esm_dict_padding_idx) esmaa = torch.cat([bos, esmaa, eos], dim=1) # Use the first padding index as eos during inference. esmaa[range(B), (esmaa != 1).sum(1)] = eosi # _, esm_z, esm_s = self.esm(esmaa, return_pairs=self.config.esmfold_config.use_esm_attn_map) # Because we do not support use_esm_attn_map in the HF port as it is not used in any public models, # esm_z is always None esm_hidden_states = self.esm(esmaa, attention_mask=esmaa != 1, output_hidden_states=True)["hidden_states"] esm_s = torch.stack(esm_hidden_states, dim=2) esm_s = esm_s[:, 1:-1] # B, L, nLayers, C return esm_s def bert_mask(self, aa, esmaa, mask, pattern): new_aa = aa.clone() target = aa.clone() new_esmaa = esmaa.clone() new_aa[pattern == 1] = self.mask_idx target[pattern != 1] = 0 new_esmaa[pattern == 1] = self.esm_dict_mask_idx return new_aa, new_esmaa, target @torch.no_grad() def infer( self, seqs: Union[str, List[str]], position_ids=None, ): if isinstance(seqs, str): lst = [seqs] else: lst = seqs # Returns the raw outputs of the model given an input sequence. device = next(self.parameters()).device aatype = collate_dense_tensors( [ torch.from_numpy( residue_constants.sequence_to_onehot( sequence=seq, mapping=residue_constants.restype_order_with_x, map_unknown_to_x=True, ) ) .to(device) .argmax(dim=1) for seq in lst ] ) # B=1 x L mask = collate_dense_tensors([aatype.new_ones(len(seq)) for seq in lst]) position_ids = ( torch.arange(aatype.shape[1], device=device).expand(len(lst), -1) if position_ids is None else position_ids.to(device) ) if position_ids.ndim == 1: position_ids = position_ids.unsqueeze(0) return self.forward( aatype, mask, position_ids=position_ids, ) @staticmethod def output_to_pdb(output: Dict) -> List[str]: """Returns the pbd (file) string from the model given the model output.""" output = {k: v.to("cpu").numpy() for k, v in output.items()} pdbs = [] final_atom_positions = atom14_to_atom37(output["positions"][-1], output) final_atom_mask = output["atom37_atom_exists"] for i in range(output["aatype"].shape[0]): aa = output["aatype"][i] pred_pos = final_atom_positions[i] mask = final_atom_mask[i] resid = output["residue_index"][i] + 1 pred = OFProtein( aatype=aa, atom_positions=pred_pos, atom_mask=mask, residue_index=resid, b_factors=output["plddt"][i], ) pdbs.append(to_pdb(pred)) return pdbs def infer_pdb(self, seqs, *args, **kwargs) -> str: """Returns the pdb (file) string from the model given an input sequence.""" assert isinstance(seqs, str) output = self.infer(seqs, *args, **kwargs) return self.output_to_pdb(output)[0] def infer_pdbs(self, seqs: List[str], *args, **kwargs) -> List[str]: """Returns the pdb (file) string from the model given an input sequence.""" output = self.infer(seqs, *args, **kwargs) return self.output_to_pdb(output) __all__ = ["EsmForProteinFolding", "EsmFoldPreTrainedModel"]
transformers/src/transformers/models/esm/modeling_esmfold.py/0
{ "file_path": "transformers/src/transformers/models/esm/modeling_esmfold.py", "repo_id": "transformers", "token_count": 42486 }
# coding=utf-8 # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """FLAVA model configurations""" from typing import Any, Dict from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class FlavaImageConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FlavaImageModel`]. It is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. mask_token (`bool`, *optional*, defaults to `True`): Whether to use a mask token or not. Used in MIM (Masked Image Modeling) loss for FLAVA. vocab_size (`int`, *optional*, defaults to 8192): Vocabulary size of the [`FlavaImageCodebook`] used in conjunction with [`FlavaImageModel`] for MIM (Masked Image Modeling) loss for FLAVA. Example: ```python >>> from transformers import FlavaImageConfig, FlavaImageModel >>> # Initializing a FlavaImageModel with style configuration >>> configuration = FlavaImageConfig() >>> # Initializing a FlavaImageModel model (with random weights) from the style configuration >>> model = FlavaImageModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "flava_image_model" base_config_key = "image_config" def __init__( self, hidden_size: int = 768, num_hidden_layers: int = 12, num_attention_heads: int = 12, intermediate_size: int = 3072, hidden_act: int = "gelu", hidden_dropout_prob: float = 0.0, attention_probs_dropout_prob: float = 0.0, initializer_range: float = 0.02, layer_norm_eps: float = 1e-12, image_size: int = 224, patch_size: int = 16, num_channels: int = 3, qkv_bias: bool = True, mask_token: bool = True, vocab_size: int = 8192, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.mask_token = mask_token self.vocab_size = vocab_size class FlavaTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FlavaTextModel`]. It is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`FlavaTextModel`]. type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`FlavaTextModel`]. Note that even though text encoder allows `token_type_ids`'s value as 2, for text-only pretraining and fine-tuning, only 1 is used similar to RoBERTa. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). For VL, max_length passed to model is 77. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. Example: ```python >>> from transformers import FlavaTextConfig, FlavaTextModel >>> # Initializing a FlavaTextModel with style configuration >>> configuration = FlavaTextConfig() >>> # Initializing a FlavaTextModel model (with random weights) from the style configuration >>> model = FlavaTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "flava_text_model" base_config_key = "text_config" def __init__( self, vocab_size: int = 30522, type_vocab_size: int = 2, max_position_embeddings: int = 512, position_embedding_type: str = "absolute", hidden_size: int = 768, num_hidden_layers: int = 12, num_attention_heads: int = 12, intermediate_size: int = 3072, hidden_act: str = "gelu", hidden_dropout_prob: float = 0.0, attention_probs_dropout_prob: float = 0.0, initializer_range: float = 0.02, layer_norm_eps: float = 1e-12, pad_token_id: int = 0, qkv_bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.vocab_size = vocab_size self.type_vocab_size = type_vocab_size self.max_position_embeddings = max_position_embeddings self.position_embedding_type = position_embedding_type self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.pad_token_id = pad_token_id class FlavaMultimodalConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FlavaMultimodalModel`]. It is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. use_cls_token (`bool`, *optional*, defaults to `True`): Whether to use an extra CLS token for multimodal settings. Usually needed by the FLAVA model. Example: ```python >>> from transformers import FlavaMultimodalConfig, FlavaMultimodalModel >>> # Initializing a FlavaMultimodalModel with style configuration >>> configuration = FlavaMultimodalConfig() >>> # Initializing a FlavaMultimodalModel model (with random weights) from the style configuration >>> model = FlavaMultimodalModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "flava_multimodal_model" base_config_key = "multimodal_config" def __init__( self, hidden_size: int = 768, num_hidden_layers: int = 6, num_attention_heads: int = 12, intermediate_size: int = 3072, hidden_act: int = "gelu", hidden_dropout_prob: int = 0.0, attention_probs_dropout_prob: int = 0.0, initializer_range: float = 0.02, layer_norm_eps: float = 1e-12, qkv_bias: bool = True, use_cls_token: bool = True, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.use_cls_token = use_cls_token class FlavaImageCodebookConfig(PretrainedConfig): model_type = "flava_image_codebook" base_config_key = "image_codebook_config" r""" [`FlavaImageCodebookConfig`] is the configuration class to store the configuration of a [`FlavaImageCodebook`]. It is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-image-codebook](https://huggingface.co/facebook/flava-image-codebook) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_groups (`int`, *optional*, defaults to 4): Number of groups to be created. This parameter as of now doesn't affect the model and is used for some internal calculation and estimations. input_channels (`int`, *optional*, defaults to 3): Number of channels in the image to be passed. num_blocks_per_group (`int`, *optional*, defaults to 2): Number of conv-based blocks per group. hidden_size (`int`, *optional*, defaults to 256): Size of hidden dim for the blocks. vocab_size (`int`, *optional*, defaults to 8192): Size of the output vocabulary for the codebook. freeze (`bool`, defaults to `True`): Whether to freeze the weights of the model. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import FlavaImageCodebookConfig, FlavaImageCodebook >>> # Initializing a FlavaImageCodebook with style configuration >>> configuration = FlavaImageCodebookConfig() >>> # Initializing a FlavaImageCodebook model (with random weights) from the style configuration >>> model = FlavaImageCodebook(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ def __init__( self, num_groups: int = 4, input_channels: int = 3, num_blocks_per_group: int = 2, hidden_size: int = 256, vocab_size: int = 8192, freeze: int = True, initializer_range: float = 0.02, **kwargs, ): super().__init__(**kwargs) self.num_groups = num_groups self.input_channels = input_channels self.num_blocks_per_group = num_blocks_per_group self.hidden_size = hidden_size self.vocab_size = vocab_size self.freeze = freeze self.initializer_range = initializer_range class FlavaConfig(PretrainedConfig): r""" [`FlavaConfig`] is the configuration class to store the configuration of a [`FlavaModel`]. It is used to instantiate FLAVA model according to the specified arguments, defining the text model, image model, image codebook and multimodal model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`FlavaTextConfig`]. image_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`FlavaImageConfig`]. multimodal_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`FlavaMultimodalConfig`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and image projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original FLAVA/CLIP implementation. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. ce_ignore_index (`int`, *optional*, defaults to -100): Cross entropy index to ignore. mim_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MIM (Masked Image Modeling) unimodal loss mlm_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MLM (Masked Language Modeling) unimodal loss global_contrastive_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to global contrastive cross-alignment loss. itm_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to image-text matching multimodal loss. mmm_image_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MMM loss's image part. mmm_text_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MMM loss's text part. global_backprop_contrastive (`bool`, *optional*, defaults to `True`): Whether to use global backpropgation through all workers in contrastive loss. skip_unmasked_multimodal_encoder (`bool`, *optional*, defaults to `True`): Whether to skip running unmasked multimodal encoder whose outputs are not used by FLAVA losses. return_loss (`bool`, *optional*, defaults to `True`): Whether to return loss or not kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import FlavaConfig, FlavaModel, FlavaForPreTraining >>> # Initializing a FlavaConfig with style configuration >>> configuration = FlavaConfig() >>> # Initializing a FlavaModel and FlavaForPreTraining model (with random weights) from the style configuration >>> model = FlavaModel(configuration) >>> model_pre = FlavaForPreTraining(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> configuration_pre = model_pre.config ``` """ model_type = "flava" sub_configs = { "text_config": FlavaTextConfig, "image_config": FlavaImageConfig, "multimodal_config": FlavaMultimodalConfig, "image_codebook_config": FlavaImageCodebookConfig, } def __init__( self, image_config: Dict[str, Any] = None, text_config: Dict[str, Any] = None, multimodal_config: Dict[str, Any] = None, image_codebook_config: Dict[str, Any] = None, hidden_size: int = 768, layer_norm_eps: float = 1e-12, projection_dim: int = 768, init_codebook: bool = True, logit_scale_init_value: float = 2.6592, initializer_range: float = 0.02, ce_ignore_index: int = -100, mim_weight: float = 1.0, mlm_weight: float = 1.0, global_contrastive_weight: float = 1.0, itm_weight: float = 1.0, mmm_image_weight: float = 1.0, mmm_text_weight: float = 1.0, global_backprop_contrastive: bool = True, skip_unmasked_multimodal_encoder: bool = True, return_loss: bool = True, **kwargs, ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). text_config_dict = kwargs.pop("text_config_dict", None) image_config_dict = kwargs.pop("image_config_dict", None) multimodal_config_dict = kwargs.pop("multimodal_config_dict", None) image_codebook_config_dict = kwargs.pop("image_codebook_config_dict", None) super().__init__(**kwargs) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: text_config = {} # This is the complete result when using `text_config_dict`. _text_config_dict = FlavaTextConfig(**text_config_dict).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: message = ( f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " f'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`text_config_dict` is provided which will be used to initialize `FlavaTextConfig`. The " f'value `text_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict) if image_config_dict is not None: if image_config is None: image_config = {} # This is the complete result when using `image_config_dict`. _image_config_dict = FlavaImageConfig(**image_config_dict).to_dict() # convert keys to string instead of integer if "id2label" in _image_config_dict: _image_config_dict["id2label"] = { str(key): value for key, value in _image_config_dict["id2label"].items() } # Give a warning if the values exist in both `_image_config_dict` and `image_config` but being different. for key, value in _image_config_dict.items(): if key in image_config and value != image_config[key] and key not in ["transformers_version"]: # If specified in `image_config_dict` if key in image_config_dict: message = ( f"`{key}` is found in both `image_config_dict` and `image_config` but with different " f'values. The value `image_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`image_config_dict` is provided which will be used to initialize `FlavaImageConfig`. " f'The value `image_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `image_config` with the ones in `_image_config_dict`. image_config.update(_image_config_dict) if multimodal_config_dict is not None: if multimodal_config is None: multimodal_config = {} # This is the complete result when using `multimodal_config_dict`. _multimodal_config_dict = FlavaMultimodalConfig(**multimodal_config_dict).to_dict() # Give a warning if the values exist in both `_multimodal_config_dict` and `multimodal_config` but being # different. for key, value in _multimodal_config_dict.items(): if ( key in multimodal_config and value != multimodal_config[key] and key not in ["transformers_version"] ): # If specified in `multimodal_config_dict` if key in multimodal_config_dict: message = ( f"`{key}` is found in both `multimodal_config_dict` and `multimodal_config` but with " f'different values. The value `multimodal_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`multimodal_config_dict` is provided which will be used to initialize " f'`FlavaMultimodalConfig`. The value `multimodal_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `multimodal_config` with the ones in `_multimodal_config_dict`. multimodal_config.update(_multimodal_config_dict) if image_codebook_config_dict is not None: if image_codebook_config is None: image_codebook_config = {} # This is the complete result when using `image_codebook_config_dict`. _image_codebook_config_dict = FlavaImageCodebookConfig(**image_codebook_config_dict).to_dict() # Give a warning if the values exist in both `_image_codebook_config_dict` and `image_codebook_config` but # being different. for key, value in _image_codebook_config_dict.items(): if ( key in image_codebook_config and value != image_codebook_config[key] and key not in ["transformers_version"] ): # If specified in `image_codebook_config_dict` if key in image_codebook_config_dict: message = ( f"`{key}` is found in both `image_codebook_config_dict` and `image_codebook_config` but " f'with different values. The value `image_codebook_config_dict["{key}"]` will be used ' "instead." ) # If inferred from default argument values (just to be super careful) else: message = ( f"`image_codebook_config_dict` is provided which will be used to initialize " f'`FlavaImageCodebookConfig`. The value `image_codebook_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `image_codebook_config` with the ones in `_image_codebook_config_dict`. image_codebook_config.update(_image_codebook_config_dict) if image_config is None: image_config = {} logger.info("`image_config` is `None`. initializing the `FlavaImageConfig` with default values.") if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `FlavaTextConfig` with default values.") if multimodal_config is None: multimodal_config = {} logger.info("`multimodal_config` is `None`. initializing the `FlavaMultimodalConfig` with default values.") if image_codebook_config is None: image_codebook_config = {} logger.info( "`image_codebook_config` is `None`. initializing the `FlavaImageCodebookConfig` with default values." ) self.image_config = FlavaImageConfig(**image_config) self.text_config = FlavaTextConfig(**text_config) self.multimodal_config = FlavaMultimodalConfig(**multimodal_config) self.image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config) self.projection_dim = projection_dim self.init_codebook = init_codebook self.hidden_size = hidden_size self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.ce_ignore_index = ce_ignore_index self.mim_weight = mim_weight self.mlm_weight = mlm_weight self.global_contrastive_weight = global_contrastive_weight self.itm_weight = itm_weight self.mmm_image_weight = mmm_image_weight self.mmm_text_weight = mmm_text_weight self.global_backprop_contrastive = global_backprop_contrastive self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder self.return_loss = return_loss @classmethod def from_configs( cls, image_config: FlavaImageConfig, text_config: FlavaTextConfig, multimodal_config: FlavaMultimodalConfig, image_codebook_config: FlavaImageCodebookConfig, **kwargs, ): r""" Instantiate a [`FlavaConfig`] (or a derived class) from flava text model configuration, flava image model configuration, flava multimodal model and flava codebook model configuration. Returns: [`FlavaConfig`]: An instance of a configuration object """ return cls( image_config=image_config.to_dict(), text_config=text_config.to_dict(), multimodal_config=multimodal_config.to_dict(), image_codebook_config=image_codebook_config.to_dict(), **kwargs, ) __all__ = ["FlavaConfig", "FlavaImageCodebookConfig", "FlavaImageConfig", "FlavaMultimodalConfig", "FlavaTextConfig"]
transformers/src/transformers/models/flava/configuration_flava.py/0
{ "file_path": "transformers/src/transformers/models/flava/configuration_flava.py", "repo_id": "transformers", "token_count": 13784 }
# coding=utf-8 # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch FocalNet model.""" import collections.abc import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_focalnet import FocalNetConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "FocalNetConfig" # Base docstring _CHECKPOINT_FOR_DOC = "microsoft/focalnet-tiny" _EXPECTED_OUTPUT_SHAPE = [1, 49, 768] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "microsoft/focalnet-tiny" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" @dataclass class FocalNetEncoderOutput(ModelOutput): """ FocalNet encoder's outputs, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class FocalNetModelOutput(ModelOutput): """ FocalNet model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class FocalNetMaskedImageModelingOutput(ModelOutput): """ FocalNet masked image model outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): Masked image modeling (MLM) loss. reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed pixel values. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None reconstruction: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class FocalNetImageClassifierOutput(ModelOutput): """ FocalNet outputs for image classification. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class FocalNetEmbeddings(nn.Module): """ Construct the patch embeddings and layernorm. Optionally, also the mask token. """ def __init__(self, config, use_mask_token=False): super().__init__() self.patch_embeddings = FocalNetPatchEmbeddings( config=config, image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.embed_dim, use_conv_embed=config.use_conv_embed, is_stem=True, ) self.patch_grid = self.patch_embeddings.grid_size self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None self.norm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None ) -> Tuple[torch.Tensor]: embeddings, output_dimensions = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) batch_size, seq_len, _ = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask embeddings = self.dropout(embeddings) return embeddings, output_dimensions class FocalNetPatchEmbeddings(nn.Module): def __init__( self, config, image_size, patch_size, num_channels, embed_dim, add_norm=False, use_conv_embed=False, is_stem=False, ): super().__init__() image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) if use_conv_embed: # if we choose to use conv embedding, then we treat the stem and non-stem differently if is_stem: kernel_size = 7 padding = 2 stride = 4 else: kernel_size = 3 padding = 1 stride = 2 self.projection = nn.Conv2d( num_channels, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding ) else: self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size) if add_norm: self.norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) else: self.norm = None def maybe_pad(self, pixel_values, height, width): if width % self.patch_size[1] != 0: pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) pixel_values = nn.functional.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) pixel_values = nn.functional.pad(pixel_values, pad_values) return pixel_values def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # pad the input to be divisible by self.patch_size, if needed pixel_values = self.maybe_pad(pixel_values, height, width) embeddings = self.projection(pixel_values) _, _, height, width = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) if self.norm is not None: embeddings = self.norm(embeddings) return embeddings, output_dimensions # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->FocalNet class FocalNetDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class FocalNetModulation(nn.Module): def __init__(self, config, index, dim, focal_factor=2, bias=True, projection_dropout=0.0): super().__init__() self.dim = dim self.focal_window = config.focal_windows[index] self.focal_level = config.focal_levels[index] self.focal_factor = focal_factor self.use_post_layernorm_in_modulation = config.use_post_layernorm_in_modulation self.normalize_modulator = config.normalize_modulator self.projection_in = nn.Linear(dim, 2 * dim + (self.focal_level + 1), bias=bias) self.projection_context = nn.Conv2d(dim, dim, kernel_size=1, stride=1, bias=bias) self.activation = nn.GELU() self.projection_out = nn.Linear(dim, dim) self.projection_dropout = nn.Dropout(projection_dropout) self.focal_layers = nn.ModuleList() self.kernel_sizes = [] for k in range(self.focal_level): kernel_size = self.focal_factor * k + self.focal_window self.focal_layers.append( nn.Sequential( nn.Conv2d( dim, dim, kernel_size=kernel_size, stride=1, groups=dim, padding=kernel_size // 2, bias=False ), nn.GELU(), ) ) self.kernel_sizes.append(kernel_size) if self.use_post_layernorm_in_modulation: self.layernorm = nn.LayerNorm(dim, eps=config.layer_norm_eps) def forward(self, hidden_state): """ Args: hidden_state: Input features with shape of (batch_size, height, width, num_channels) """ num_channels = hidden_state.shape[-1] # pre linear projection x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous() q, ctx, self.gates = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1) # context aggreation ctx_all = 0 for level in range(self.focal_level): ctx = self.focal_layers[level](ctx) ctx_all = ctx_all + ctx * self.gates[:, level : level + 1] ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True)) ctx_all = ctx_all + ctx_global * self.gates[:, self.focal_level :] # normalize context if self.normalize_modulator: ctx_all = ctx_all / (self.focal_level + 1) # focal modulation self.modulator = self.projection_context(ctx_all) x_out = q * self.modulator x_out = x_out.permute(0, 2, 3, 1).contiguous() if self.use_post_layernorm_in_modulation: x_out = self.layernorm(x_out) # post linear porjection x_out = self.projection_out(x_out) x_out = self.projection_dropout(x_out) return x_out class FocalNetMlp(nn.Module): def __init__(self, config, in_features, hidden_features=None, out_features=None, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.activation = ACT2FN[config.hidden_act] self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, hidden_state): hidden_state = self.fc1(hidden_state) hidden_state = self.activation(hidden_state) hidden_state = self.drop(hidden_state) hidden_state = self.fc2(hidden_state) hidden_state = self.drop(hidden_state) return hidden_state class FocalNetLayer(nn.Module): r"""Focal Modulation Network layer (block). Args: config (`FocalNetConfig`): Model config. index (`int`): Layer index. dim (`int`): Number of input channels. input_resolution (`Tuple[int]`): Input resulotion. drop_path (`float`, *optional*, defaults to 0.0): Stochastic depth rate. """ def __init__(self, config, index, dim, input_resolution, drop_path=0.0): super().__init__() self.config = config # layer-specific attributes self.dim = dim self.input_resolution = input_resolution # general attributes self.drop = config.hidden_dropout_prob self.use_post_layernorm = config.use_post_layernorm self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.modulation = FocalNetModulation( config=config, index=index, dim=dim, projection_dropout=self.drop, ) self.drop_path = FocalNetDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps) mlp_hidden_dim = int(dim * config.mlp_ratio) self.mlp = FocalNetMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=self.drop) self.gamma_1 = 1.0 self.gamma_2 = 1.0 if config.use_layerscale: self.gamma_1 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True) self.gamma_2 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True) def forward(self, hidden_state, input_dimensions): height, width = input_dimensions batch_size, _, num_channels = hidden_state.shape shortcut = hidden_state # Focal Modulation hidden_state = hidden_state if self.use_post_layernorm else self.norm1(hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = self.modulation(hidden_state).view(batch_size, height * width, num_channels) hidden_state = hidden_state if not self.use_post_layernorm else self.norm1(hidden_state) # FFN hidden_state = shortcut + self.drop_path(self.gamma_1 * hidden_state) hidden_state = hidden_state + self.drop_path( self.gamma_2 * (self.norm2(self.mlp(hidden_state)) if self.use_post_layernorm else self.mlp(self.norm2(hidden_state))) ) return hidden_state class FocalNetStage(nn.Module): def __init__(self, config, index, input_resolution): super().__init__() self.config = config self.num_stages = len(config.depths) embed_dim = [config.embed_dim * (2**i) for i in range(self.num_stages)] dim = embed_dim[index] out_dim = embed_dim[index + 1] if (index < self.num_stages - 1) else None downsample = FocalNetPatchEmbeddings if (index < self.num_stages - 1) else None # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] drop_path = dpr[sum(config.depths[:index]) : sum(config.depths[: index + 1])] self.layers = nn.ModuleList( [ FocalNetLayer( config=config, index=index, dim=dim, input_resolution=input_resolution, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, ) for i in range(config.depths[index]) ] ) if downsample is not None: self.downsample = downsample( config=config, image_size=input_resolution, patch_size=2, num_channels=dim, embed_dim=out_dim, add_norm=True, use_conv_embed=config.use_conv_embed, is_stem=False, ) else: self.downsample = None self.pointing = False def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int]) -> Tuple[torch.Tensor]: height, width = input_dimensions for layer_module in self.layers: hidden_states = layer_module(hidden_states, input_dimensions) hidden_states_before_downsampling = hidden_states if self.downsample is not None: height, width = input_dimensions hidden_states = hidden_states.transpose(1, 2).reshape( hidden_states_before_downsampling.shape[0], -1, height, width ) hidden_states, output_dimensions = self.downsample(hidden_states) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) return stage_outputs class FocalNetEncoder(nn.Module): def __init__(self, config, grid_size): super().__init__() self.num_stages = len(config.depths) self.config = config self.stages = nn.ModuleList( [ FocalNetStage( config=config, index=i_layer, input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), ) for i_layer in range(self.num_stages) ] ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, FocalNetEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None if output_hidden_states: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, stage_module in enumerate(self.stages): if self.gradient_checkpointing and self.training: stage_outputs = self._gradient_checkpointing_func( stage_module.__call__, hidden_states, input_dimensions, ) else: stage_outputs = stage_module(hidden_states, input_dimensions) hidden_states = stage_outputs[0] hidden_states_before_downsampling = stage_outputs[1] output_dimensions = stage_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states_before_downsampling.shape # rearrange b (h w) c -> b c h w # here we use the original (not downsampled) height and width reshaped_hidden_state = hidden_states_before_downsampling.view( batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size ) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return FocalNetEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, reshaped_hidden_states=all_reshaped_hidden_states, ) # Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->FocalNet,swin->focalnet class FocalNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = FocalNetConfig base_model_prefix = "focalnet" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["FocalNetStage"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) FOCALNET_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`FocalNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ FOCALNET_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`AutoImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare FocalNet Model outputting raw hidden-states without any specific head on top.", FOCALNET_START_DOCSTRING, ) class FocalNetModel(FocalNetPreTrainedModel): def __init__(self, config, add_pooling_layer=True, use_mask_token=False): super().__init__(config) self.config = config self.num_stages = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_stages - 1)) self.embeddings = FocalNetEmbeddings(config, use_mask_token=use_mask_token) self.encoder = FocalNetEncoder(config, self.embeddings.patch_grid) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=FocalNetModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, FocalNetModelOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) encoder_outputs = self.encoder( embedding_output, input_dimensions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return FocalNetModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) @add_start_docstrings( """FocalNet Model with a decoder on top for masked image modeling. This follows the same implementation as in [SimMIM](https://arxiv.org/abs/2111.09886). <Tip> Note that we provide a script to pre-train this model on custom data in our [examples directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). </Tip> """, FOCALNET_START_DOCSTRING, ) class FocalNetForMaskedImageModeling(FocalNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.focalnet = FocalNetModel(config, add_pooling_layer=False, use_mask_token=True) self.num_stages = len(config.depths) num_features = int(config.embed_dim * 2 ** (self.num_stages - 1)) self.decoder = nn.Sequential( nn.Conv2d( in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1 ), nn.PixelShuffle(config.encoder_stride), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FocalNetMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, FocalNetMaskedImageModelingOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, FocalNetConfig, FocalNetForMaskedImageModeling >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-base-simmim-window6-192") >>> config = FocalNetConfig() >>> model = FocalNetForMaskedImageModeling(config) >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values >>> # create random boolean mask of shape (batch_size, num_patches) >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool() >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss, reconstructed_pixel_values = outputs.loss, outputs.logits >>> list(reconstructed_pixel_values.shape) [1, 3, 192, 192] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.focalnet( pixel_values, bool_masked_pos=bool_masked_pos, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # Reshape to (batch_size, num_channels, height, width) sequence_output = sequence_output.transpose(1, 2) batch_size, num_channels, sequence_length = sequence_output.shape height = width = math.floor(sequence_length**0.5) sequence_output = sequence_output.reshape(batch_size, num_channels, height, width) # Reconstruct pixel values reconstructed_pixel_values = self.decoder(sequence_output) masked_im_loss = None if bool_masked_pos is not None: size = self.config.image_size // self.config.patch_size bool_masked_pos = bool_masked_pos.reshape(-1, size, size) mask = ( bool_masked_pos.repeat_interleave(self.config.patch_size, 1) .repeat_interleave(self.config.patch_size, 2) .unsqueeze(1) .contiguous() ) reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none") masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels if not return_dict: output = (reconstructed_pixel_values,) + outputs[2:] return ((masked_im_loss,) + output) if masked_im_loss is not None else output return FocalNetMaskedImageModelingOutput( loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, reshaped_hidden_states=outputs.reshaped_hidden_states, ) @add_start_docstrings( """ FocalNet Model with an image classification head on top (a linear layer on top of the pooled output) e.g. for ImageNet. """, FOCALNET_START_DOCSTRING, ) class FocalNetForImageClassification(FocalNetPreTrainedModel): # Copied from transformers.models.swin.modeling_swin.SwinForImageClassification.__init__ with Swin->FocalNet, swin->focalnet def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.focalnet = FocalNetModel(config) # Classifier head self.classifier = ( nn.Linear(self.focalnet.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=FocalNetImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, FocalNetImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.focalnet( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return FocalNetImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, reshaped_hidden_states=outputs.reshaped_hidden_states, ) @add_start_docstrings( """ FocalNet backbone, to be used with frameworks like X-Decoder. """, FOCALNET_START_DOCSTRING, ) class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin): def __init__(self, config: FocalNetConfig): super().__init__(config) super()._init_backbone(config) self.num_features = [config.embed_dim] + config.hidden_sizes self.focalnet = FocalNetModel(config) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny-lrf") >>> model = AutoBackbone.from_pretrained("microsoft/focalnet-tiny-lrf") >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.focalnet(pixel_values, output_hidden_states=True, return_dict=True) hidden_states = outputs.reshaped_hidden_states feature_maps = () for idx, stage in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) __all__ = [ "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ]
transformers/src/transformers/models/focalnet/modeling_focalnet.py/0
{ "file_path": "transformers/src/transformers/models/focalnet/modeling_focalnet.py", "repo_id": "transformers", "token_count": 18424 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Fuyu.""" import math from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( pad, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, is_valid_image, make_list_of_images, to_numpy_array, validate_preprocess_arguments, ) from ...utils import ( TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_device, is_torch_dtype, logging, requires_backends, ) if is_torch_available(): import torch logger = logging.get_logger(__name__) def make_list_of_list_of_images( images: Union[List[List[ImageInput]], List[ImageInput], ImageInput], ) -> List[List[ImageInput]]: if is_valid_image(images): return [[images]] if isinstance(images, list) and all(isinstance(image, list) for image in images): return images if isinstance(images, list): return [make_list_of_images(image) for image in images] raise ValueError("images must be a list of list of images or a list of images or an image.") class FuyuBatchFeature(BatchFeature): """ BatchFeature class for Fuyu image processor and processor. The outputs dictionary from the processors contains a mix of tensors and lists of tensors. """ def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None): """ Convert the inner content to tensors. Args: tensor_type (`str` or [`~utils.TensorType`], *optional*): The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If `None`, no modification is done. """ if tensor_type is None: return self is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type=tensor_type) def _convert_tensor(elem): if is_tensor(elem): return elem return as_tensor(elem) def _safe_convert_tensor(elem): try: return _convert_tensor(elem) except: # noqa E722 if key == "overflowing_values": raise ValueError("Unable to create tensor returning overflowing values of different lengths. ") raise ValueError( "Unable to create tensor, you should probably activate padding " "with 'padding=True' to have batched tensors with the same length." ) # Do the tensor conversion in batch for key, value in self.items(): if isinstance(value, list) and isinstance(value[0], list): # List[List[Any]] -> List[List[Tensor]] self[key] = [[_safe_convert_tensor(elem) for elem in elems] for elems in value] elif isinstance(value, list): # List[Any] -> List[Tensor] self[key] = [_safe_convert_tensor(elem) for elem in value] else: # Any -> Tensor self[key] = _safe_convert_tensor(value) return self def to(self, *args, **kwargs) -> "BatchFeature": """ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in different `dtypes` and sending the `BatchFeature` to a different `device`. Args: args (`Tuple`): Will be passed to the `to(...)` function of the tensors. kwargs (`Dict`, *optional*): Will be passed to the `to(...)` function of the tensors. Returns: [`BatchFeature`]: The same instance after modification. """ requires_backends(self, ["torch"]) import torch # noqa new_data = {} device = kwargs.get("device") # Check if the args are a device or a dtype if device is None and len(args) > 0: # device should be always the first argument arg = args[0] if is_torch_dtype(arg): # The first argument is a dtype pass elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int): device = arg else: # it's something else raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.") def _to(elem): # check if v is a floating point if torch.is_floating_point(elem): # cast and send to device return elem.to(*args, **kwargs) if device is not None: return elem.to(device=device) return elem # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor` for k, v in self.items(): if isinstance(v, list) and isinstance(v[0], list): # Data structure is a list of lists new_v = [] for elems in v: new_v.append([_to(elem) for elem in elems]) new_data[k] = new_v elif isinstance(v, list): # Data structure is a list new_data[k] = [_to(elem) for elem in v] else: new_data[k] = _to(v) self.data = new_data return self class FuyuImageProcessor(BaseImageProcessor): """ This class should handle the image processing part before the main FuyuForCausalLM. In particular, it should handle: - Processing Images: Taking a batch of images as input. If the images are variable-sized, it resizes them based on the desired patch dimensions. The image output is always img_h, img_w of (1080, 1920) Then, it patches up these images using the patchify_image function. - Creating Image Input IDs: For each patch, a placeholder ID is given to identify where these patches belong in a token sequence. For variable-sized images, each line of patches is terminated with a newline ID. - Image Patch Indices: For each image patch, the code maintains an index where these patches should be inserted in a token stream. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image to `size`. size (`Dict[str, int]`, *optional*, defaults to `{"height": 1080, "width": 1920}`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to `size`. padding_value (`float`, *optional*, defaults to 1.0): The value to pad the image with. padding_mode (`str`, *optional*, defaults to `"constant"`): The padding mode to use when padding the image. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. image_mean (`float`, *optional*, defaults to 0.5): The mean to use when normalizing the image. image_std (`float`, *optional*, defaults to 0.5): The standard deviation to use when normalizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `1 / 255`): The factor to use when rescaling the image. patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 30, "width": 30}`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. """ model_input_names = [ "images", "image_input_ids", "image_patches", "image_patch_indices_per_batch", "image_patch_indices_per_subsequence", ] def __init__( self, do_resize: bool = True, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_pad: bool = True, padding_value: float = 1.0, padding_mode: str = "constant", do_normalize: bool = True, image_mean: Union[float, List[float]] = 0.5, image_std: Union[float, List[float]] = 0.5, do_rescale: bool = True, rescale_factor: float = 1 / 255, patch_size: Optional[Dict[str, int]] = None, **kwargs, ): super().__init__(**kwargs) self.do_resize = do_resize self.size = size if size is not None else {"height": 1080, "width": 1920} self.resample = resample self.do_pad = do_pad self.padding_value = padding_value self.padding_mode = padding_mode self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.patch_size = patch_size if patch_size is not None else {"height": 30, "width": 30} def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ image_height, image_width = get_image_size(image, input_data_format) target_height, target_width = size["height"], size["width"] if image_width <= target_width and image_height <= target_height: return image height_scale_factor = target_height / image_height width_scale_factor = target_width / image_width optimal_scale_factor = min(height_scale_factor, width_scale_factor) new_height = int(image_height * optimal_scale_factor) new_width = int(image_width * optimal_scale_factor) scaled_image = resize( image=image, size=(new_height, new_width), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) return scaled_image def pad_image( self, image: np.ndarray, size: Dict[str, int], mode: str = "constant", constant_values: float = 1.0, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to pad. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. data_format (`ChannelDimension` or `str`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ image_height, image_width = get_image_size(image, input_data_format) target_height, target_width = size["height"], size["width"] padding_top = 0 padding_left = 0 padding_bottom = target_height - image_height padding_right = target_width - image_width padded_image = pad( image, padding=((padding_top, padding_bottom), (padding_left, padding_right)), mode=mode, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image @filter_out_non_signature_kwargs() def preprocess( self, images, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: Optional[PILImageResampling] = None, do_pad: Optional[bool] = None, padding_value: Optional[float] = None, padding_mode: Optional[str] = None, do_normalize: Optional[bool] = None, image_mean: Optional[float] = None, image_std: Optional[float] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, patch_size: Optional[Dict[str, int]] = None, data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, return_tensors: Optional[TensorType] = None, ): """ Utility function to preprocess the images and extract necessary information about original formats. Args: images (`ImageInput`): Images to preprocess. Expects a single image, a list or images or a list of lists of images. Pixel values range from 0 to 255, or between 0 and 1 if `do_rescale` is `False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image to `size`. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image to `size`. padding_value (`float`, *optional*, defaults to `self.padding_value`): The value to pad the image with. padding_mode (`str`, *optional*, defaults to `self.padding_mode`): The padding mode to use when padding the image. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float`, *optional*, defaults to `self.image_mean`): The mean to use when normalizing the image. image_std (`float`, *optional*, defaults to `self.image_std`): The standard deviation to use when normalizing the image. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): The factor to use when rescaling the image. patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format of the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_pad = do_pad if do_pad is not None else self.do_pad do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std padding_value = padding_value if padding_value is not None else self.padding_value padding_mode = padding_mode if padding_mode is not None else self.padding_mode do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor patch_size = patch_size if patch_size is not None else self.patch_size if isinstance(images, list) and any(isinstance(elem, list) and len(elem) >= 2 for elem in images): raise ValueError("Multiple images for a single sample are not yet supported.") batch_images = make_list_of_list_of_images(images) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg. do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. batch_images = [[to_numpy_array(image) for image in images] for images in batch_images] if do_rescale and is_scaled_image(batch_images[0][0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(batch_images[0][0]) original_image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images] size = get_size_dict(size) # for BC if do_resize: batch_images = [ [self.resize(image, size=size, input_data_format=input_data_format) for image in images] for images in batch_images ] image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images] image_unpadded_heights = [[image_size[0]] for image_size in image_sizes] image_unpadded_widths = [[image_size[1]] for image_size in image_sizes] # scale_h is the same as scale_w image_scale_factors = [ [resized_size[0] / original_size[0]] for original_size, resized_size in zip(original_image_sizes, image_sizes) ] if do_pad: batch_images = [ [ self.pad_image( image, size=size, mode=padding_mode, constant_values=padding_value, input_data_format=input_data_format, ) for image in images ] for images in batch_images ] if do_rescale: batch_images = [ [self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) for image in images] for images in batch_images ] if do_normalize: batch_images = [ [ self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] for images in batch_images ] if data_format is not None: batch_images = [ [to_channel_dimension_format(image, data_format, input_data_format) for image in images] for images in batch_images ] data = { "images": batch_images, "image_unpadded_heights": image_unpadded_heights, "image_unpadded_widths": image_unpadded_widths, "image_scale_factors": image_scale_factors, } return FuyuBatchFeature(data=data, tensor_type=return_tensors) def get_num_patches(self, image_height: int, image_width: int, patch_size: Dict[str, int] = None) -> int: """ Calculate number of patches required to encode an image. Args: image_height (`int`): Height of the image. image_width (`int`): Width of the image. patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. """ patch_size = patch_size if patch_size is not None else self.patch_size patch_height, patch_width = self.patch_size["height"], self.patch_size["width"] if image_height % patch_height != 0: raise ValueError(f"{image_height=} must be divisible by {patch_height}") if image_width % patch_width != 0: raise ValueError(f"{image_width=} must be divisible by {patch_width}") num_patches_per_dim_h = image_height // patch_height num_patches_per_dim_w = image_width // patch_width num_patches = num_patches_per_dim_h * num_patches_per_dim_w return num_patches def patchify_image(self, image: "torch.Tensor", patch_size: Optional[Dict[str, int]] = None) -> "torch.Tensor": """ Convert an image into a tensor of patches. Args: image (`torch.Tensor`): Image to convert. Shape: [batch, channels, height, width] patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. """ requires_backends(self, ["torch"]) patch_size = patch_size if patch_size is not None else self.patch_size patch_height, patch_width = patch_size["height"], patch_size["width"] # TODO refer to https://github.com/ArthurZucker/transformers/blob/0f0a3fe5ca5697ee58faeb5b53f049af720b5e98/src/transformers/models/vit_mae/modeling_vit_mae.py#L871 # torch implementation is faster but does not handle non-squares batch_size, channels, _, _ = image.shape unfolded_along_height = image.unfold(2, patch_height, patch_height) patches = unfolded_along_height.unfold(3, patch_width, patch_width) patches = patches.contiguous() patches = patches.view(batch_size, channels, -1, patch_height, patch_width) patches = patches.permute(0, 2, 3, 4, 1) patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width) return patches def preprocess_with_tokenizer_info( self, image_input: "torch.Tensor", image_present: "torch.Tensor", image_unpadded_h: "torch.Tensor", image_unpadded_w: "torch.Tensor", image_placeholder_id: int, image_newline_id: int, variable_sized: bool, patch_size: Optional[Dict[str, int]] = None, ) -> FuyuBatchFeature: """Process images for model input. In particular, variable-sized images are handled here. Args: image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]): Tensor of images padded to model input size. image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]): Tensor of 1s and 0s indicating whether an image is present. image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]): Tensor of unpadded image heights. image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]): Tensor of unpadded image widths. image_placeholder_id (int): The id of the image placeholder token. Comes from an associated tokenizer. image_newline_id (int): The id of the image newline token. Comes from an associated tokenizer. variable_sized (bool): Whether to process images as variable-sized. patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Size of the patches. """ requires_backends(self, ["torch"]) patch_size = patch_size if patch_size is not None else self.patch_size patch_height, patch_width = patch_size["height"], patch_size["width"] # Only images that are present. images: List[List[torch.Tensor]] = [] batch_image_patches: List[List[torch.Tensor]] = [] # Image input ids for every subsequence, including ones with no image present. batch_image_input_ids: List[List[torch.Tensor]] = [] for batch_index in range(image_input.shape[0]): image_input_ids = [] image_patches = [] for subseq_index in range(image_input.shape[1]): if image_present[batch_index, subseq_index]: image = image_input[batch_index, subseq_index] image_height, image_width = image.shape[1], image.shape[2] if variable_sized: # The min() is required here due to floating point issues: # math.ceil(torch.tensor(300).cuda() / 30) == 11 new_h = min( image_height, math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height, ) new_w = min( image_width, math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width, ) image = image[:, :new_h, :new_w] image_height, image_width = new_h, new_w num_patches = self.get_num_patches(image_height=image_height, image_width=image_width) tensor_of_image_ids = torch.full( [num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device ) patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0) assert num_patches == patches.shape[0] if variable_sized: # Now terminate each line with |NEWLINE|. tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width) newline_ids = torch.full( [tensor_of_image_ids.shape[0], 1], image_newline_id, dtype=torch.int32, device=image_input.device, ) tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1) tensor_of_image_ids = tensor_of_image_ids.reshape(-1) images.append([image]) image_input_ids.append(tensor_of_image_ids) image_patches.append(patches) else: image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device)) batch_image_input_ids.append(image_input_ids) batch_image_patches.append(image_patches) # Create image_patch_input_indices, where non-negative values correspond to image patches to be inserted in # the stream. image_patch_indices_per_batch: List[List[torch.Tensor]] = [] image_patch_indices_per_subsequence: List[List[torch.Tensor]] = [] for sample_image_input_ids in batch_image_input_ids: index_offset = 0 per_batch_indices = [] per_subsequence_indices = [] for subseq_image_input_ids in sample_image_input_ids: # Indices of image patches. patches_mask = subseq_image_input_ids == image_placeholder_id num_patches = torch.count_nonzero(patches_mask) indices = torch.arange(num_patches, dtype=torch.int64, device=subseq_image_input_ids.device).type_as( subseq_image_input_ids ) # Place those indices in the image input ids token stream, with -1 representing non-index tokens. indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1) indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1) patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0] indices_in_stream_per_batch[patches_inds] = indices + index_offset indices_in_stream_per_subsequence[patches_inds] = indices per_batch_indices.append(indices_in_stream_per_batch) per_subsequence_indices.append(indices_in_stream_per_subsequence) index_offset += num_patches image_patch_indices_per_batch.append(per_batch_indices) image_patch_indices_per_subsequence.append(per_subsequence_indices) return FuyuBatchFeature( data={ "images": images, "image_input_ids": batch_image_input_ids, "image_patches": batch_image_patches, "image_patch_indices_per_batch": image_patch_indices_per_batch, "image_patch_indices_per_subsequence": image_patch_indices_per_subsequence, } ) __all__ = ["FuyuImageProcessor"]
transformers/src/transformers/models/fuyu/image_processing_fuyu.py/0
{ "file_path": "transformers/src/transformers/models/fuyu/image_processing_fuyu.py", "repo_id": "transformers", "token_count": 15075 }
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from ...modeling_tf_utils import keras from .tokenization_gpt2 import GPT2Tokenizer class TFGPT2Tokenizer(keras.layers.Layer): """ This is an in-graph tokenizer for GPT2. It should be initialized similarly to other tokenizers, using the `from_pretrained()` method. It can also be initialized with the `from_tokenizer()` method, which imports settings from an existing standard tokenizer object. In-graph tokenizers, unlike other Hugging Face tokenizers, are actually Keras layers and are designed to be run when the model is called, rather than during preprocessing. As a result, they have somewhat more limited options than standard tokenizer classes. They are most useful when you want to create an end-to-end model that goes straight from `tf.string` inputs to outputs. Args: vocab (Dict[str, int]): Vocabulary dict for Byte Pair Tokenizer merges (List[str]): Merges list for Byte Pair Tokenizer """ def __init__(self, vocab: Dict[str, int], merges: List[str], max_length: int = None, pad_token_id: int = None): super().__init__() self.pad_token_id = pad_token_id self.max_length = max_length self.vocab = vocab self.merges = merges self.tf_tokenizer = BytePairTokenizer(vocab, merges, sequence_length=max_length) @classmethod def from_tokenizer(cls, tokenizer: GPT2Tokenizer, *args, **kwargs): """Creates TFGPT2Tokenizer from GPT2Tokenizer Args: tokenizer (GPT2Tokenizer) Examples: ```python from transformers import AutoTokenizer, TFGPT2Tokenizer tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tf_tokenizer = TFGPT2Tokenizer.from_tokenizer(tokenizer) ``` """ merges = [" ".join(m) for m in tokenizer.bpe_ranks.keys()] vocab = tokenizer.get_vocab() return cls(vocab, merges, *args, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs): """Creates TFGPT2Tokenizer from pretrained GPT2Tokenizer Args: pretrained_model_name_or_path (Union[str, os.PathLike]): Path to pretrained model Examples: ```python from transformers import TFGPT2Tokenizer tf_tokenizer = TFGPT2Tokenizer.from_pretrained("openai-community/gpt2") ``` """ tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) return cls.from_tokenizer(tokenizer, *init_inputs, **kwargs) @classmethod def from_config(cls, config): """Creates TFGPT2Tokenizer from configurations Args: config (Dict): Dictionary with keys such as stated in `get_config`. """ return cls(**config) def get_config(self): return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def call(self, x, max_length: int = None): input_ids = self.tf_tokenizer(x) attention_mask = tf.ones_like(input_ids) if self.pad_token_id is not None: # pad the tokens up to max length max_length = max_length if max_length is not None else self.max_length if max_length is not None: input_ids, attention_mask = pad_model_inputs( input_ids, max_seq_length=max_length, pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids} __all__ = ["TFGPT2Tokenizer"]
transformers/src/transformers/models/gpt2/tokenization_gpt2_tf.py/0
{ "file_path": "transformers/src/transformers/models/gpt2/tokenization_gpt2_tf.py", "repo_id": "transformers", "token_count": 1589 }
# coding=utf-8 # Copyright 2022 ABEJA, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch GPTNeoX model.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import Tensor, nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_gpt_neox_japanese import GPTNeoXJapaneseConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "abeja/gpt-neox-japanese-2.7b" _CONFIG_FOR_DOC = "GPTNeoXJapaneseConfig" class GPTNeoXJapanesePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTNeoXJapaneseConfig base_model_prefix = "gpt_neox_japanese" _no_split_modules = ["GPTNeoXJapaneseLayer"] _skip_keys_device_placement = "past_key_values" _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = False # TODO (fix me): compilation fails due to a stide error? def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class GPTNeoXJapaneseAttention(nn.Module): def __init__(self, config, use_bias=False, layer_idx=None): super().__init__() self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_attention_heads if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.layer_idx = layer_idx self.rotary_ndims = int(self.head_size * config.rotary_pct) self.rope_theta = config.rotary_emb_base self.rotary_emb = GPTNeoXJapaneseRotaryEmbedding(config=config) self.attention_dropout = nn.Dropout(config.attention_dropout) self.norm_factor = math.sqrt(self.head_size) self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=False) self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False) # Activate bias if the last layer self.use_bias = use_bias self.dense_bias = nn.Parameter(torch.zeros(config.hidden_size)) if use_bias else None def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, position_ids: torch.LongTensor, head_mask: Optional[torch.FloatTensor] = None, layer_past: Optional[Cache] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ): # Compute QKV # Attention heads [batch, seq_len, hidden_size] # --> [batch, seq_len, (np * 3 * head_size)] qkv = self.query_key_value(hidden_states) # [batch, seq_len, (num_heads * 3 * head_size)] # --> [batch, seq_len, num_heads, 3 * head_size] new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size) qkv = qkv.view(*new_qkv_shape) # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size] query = qkv[..., : self.head_size].permute(0, 2, 1, 3) key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3) value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3) # Compute rotary embeddings on rotary_ndims query_rot = query[..., : self.rotary_ndims] query_pass = query[..., self.rotary_ndims :] key_rot = key[..., : self.rotary_ndims] key_pass = key[..., self.rotary_ndims :] cos, sin = position_embeddings query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin) query = torch.cat((query, query_pass), dim=-1) key = torch.cat((key, key_pass), dim=-1) # Cache QKV values if layer_past is not None: cache_kwargs = { "sin": sin, "cos": cos, "partial_rotation_size": self.rotary_ndims, "cache_position": cache_position, } key, value = layer_past.update(key, value, self.layer_idx, cache_kwargs) # Compute attention attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) # Reshape outputs attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size) attn_output = self.dense(attn_output) outputs = (attn_output, layer_past) if output_attentions: outputs += (attn_weights,) return outputs, self.dense_bias @classmethod def _split_heads(cls, tensor, num_attention_heads, attn_head_size): """ Splits hidden dim into attn_head_size and num_attention_heads """ # tensor: [bs, seq_len, hidden_size] new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) # -> [bs, seq_len, num_attention_heads, attn_head_size] tensor = tensor.view(new_shape) # -> [bs, num_attention_heads, seq_len, attn_head_size] tensor = tensor.permute(0, 2, 1, 3) return tensor @classmethod def _merge_heads(cls, tensor, num_attention_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden dim """ # tensor [bs, num_attention_heads, seq_len, attn_head_size] tensor = tensor.permute(0, 2, 1, 3).contiguous() # -> [bs, seq_len, num_attention_heads, attn_head_size] tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size) # -> [bs, seq_len, hidden_size] return tensor def _attn(self, query, key, value, attention_mask=None, head_mask=None): # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size] # compute causal mask from causal mask buffer batch_size, num_attention_heads, query_length, attn_head_size = query.size() key_length = key.size(-2) query = query.view(batch_size * num_attention_heads, query_length, attn_head_size) key = key.view(batch_size * num_attention_heads, key_length, attn_head_size) # [batch_size * num_heads, q_length, kv_length] attn_scores = torch.zeros( batch_size * num_attention_heads, query_length, key_length, dtype=query.dtype, device=key.device, ) attention_scores = torch.baddbmm( attn_scores, query, key.transpose(1, 2), beta=1.0, alpha=1.0 / self.norm_factor, ) attention_scores = attention_scores.view(batch_size, num_attention_heads, query_length, -1) if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key.shape[-2]] attention_scores = attention_scores + causal_mask attn_weights = nn.functional.softmax(attention_scores, dim=-1) attn_weights = self.attention_dropout(attn_weights) attn_weights = attn_weights.to(value.dtype) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights # Copied from transformers.models.gpt_neox.modeling_gpt_neox.GPTNeoXRotaryEmbedding with GPTNeoX->GPTNeoXJapanese class GPTNeoXJapaneseRotaryEmbedding(nn.Module): def __init__(self, config: GPTNeoXJapaneseConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset # This .to() is needed if the model has been moved to a device after being initialized (because # the buffer is automatically moved, but not the original copy) self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def bias_dropout_add(x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float, training: bool) -> Tensor: """add bias to x, apply dropout and residual connection Args: x (Tensor): main path of output bias (Tensor): None or attn_bias of the last attention layer residual (Optional[Tensor]): residual value prob (float): dropout probability training (bool): whether in training mode or not Returns: Tensor: dropout(x + bias) + residual """ if bias is not None: x = x + bias out = torch.nn.functional.dropout(x, p=prob, training=training) if residual is not None: out = residual + out return out class GPTNeoXJapaneseMLP(nn.Module): def __init__(self, config): super().__init__() intermediate_size = int(config.hidden_size * config.intermediate_multiple_size) self.dense_h_to_4h = nn.Linear(config.hidden_size, intermediate_size, bias=False) # Project back to h. self.dense_4h_to_h = nn.Linear(intermediate_size, config.hidden_size, bias=False) self.act = ACT2FN[config.hidden_act] def forward(self, hidden_states): intermediate = self.dense_h_to_4h(hidden_states) intermediate = self.act(intermediate) output = self.dense_4h_to_h(intermediate) return output class GPTNeoXJapaneseLayer(nn.Module): def __init__(self, config, layer_number): super().__init__() self.layer_number = layer_number self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # activate bias only last layer self.attention = GPTNeoXJapaneseAttention( config=config, use_bias=layer_number == config.num_hidden_layers - 1, layer_idx=layer_number ) self.mlp = GPTNeoXJapaneseMLP(config) self.hidden_dropout = config.hidden_dropout def forward( self, hidden_states: Optional[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, layer_past: Optional[Cache] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ): residual = hidden_states ln_out = self.input_layernorm(hidden_states) attention_layer_outputs, attn_bias = self.attention( ln_out, attention_mask=attention_mask, layer_past=layer_past, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, position_ids=position_ids, cache_position=cache_position, position_embeddings=position_embeddings, ) attn_output = attention_layer_outputs[0] # output_attn: a, present, (attentions) outputs = attention_layer_outputs[1:] # attn_output = (atten_output + bias) + residual attn_output = bias_dropout_add( attn_output, bias=attn_bias.expand_as(residual) if attn_bias is not None else attn_bias, residual=residual, prob=self.hidden_dropout, training=self.training, ) mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) # attn_output = (mlp_output + mlp_bias) + atten_output attn_output = bias_dropout_add( mlp_output, bias=None, residual=attn_output, prob=self.hidden_dropout, training=self.training ) if use_cache: outputs = (attn_output,) + outputs else: outputs = (attn_output,) + outputs[1:] return outputs # hidden_states, present, (attentions) GPT_NEOX_JAPANESE_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`~GPTNeoXJapaneseConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GPT_NEOX_JAPANESE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. Two formats are allowed: - a [`~cache_utils.Cache`] instance; - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy cache format. The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the legacy cache format will be returned. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ @add_start_docstrings( "The bare GPTNeoXJapanese Model transformer outputting raw hidden-states without any specific head on top.", GPT_NEOX_JAPANESE_START_DOCSTRING, ) class GPTNeoXJapaneseModel(GPTNeoXJapanesePreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = nn.ModuleList( [GPTNeoXJapaneseLayer(config=config, layer_number=i) for i in range(config.num_hidden_layers)] ) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.rotary_emb = GPTNeoXJapaneseRotaryEmbedding(config=config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_in def set_input_embeddings(self, value): self.embed_in = value @add_start_docstrings_to_model_forward(GPT_NEOX_JAPANESE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, GPTNeoXJapaneseModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b") >>> model = GPTNeoXJapaneseModel.from_pretrained("abeja/gpt-neox-japanese-2.7b") >>> inputs = tokenizer("日本語のGPT-neoxがHugging Faceで使えます😀", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_in(input_ids) # kept for BC (non `Cache` `past_key_values` inputs) return_legacy_cache = False if use_cache and not isinstance(past_key_values, Cache): return_legacy_cache = True if past_key_values is None: past_key_values = DynamicCache() else: past_key_values = DynamicCache.from_legacy_cache(past_key_values) logger.warning_once( "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and " "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class " "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)" ) seq_length = inputs_embeds.shape[1] if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) next_decoder_cache = None all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], layer_past=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = outputs[0] if use_cache is True: next_decoder_cache = outputs[1] if output_attentions: all_attentions = all_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.final_layer_norm(hidden_states) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = next_decoder_cache if use_cache else None if return_legacy_cache: next_cache = next_cache.to_legacy_cache() if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attentions] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_attentions, ) # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask @add_start_docstrings( """GPTNeoXJapanese Model with a `language modeling` head on top for Classifier Model fine-tuning.""", GPT_NEOX_JAPANESE_START_DOCSTRING, ) class GPTNeoXJapaneseForCausalLM(GPTNeoXJapanesePreTrainedModel, GenerationMixin): _tied_weights_keys = ["embed_out.weight"] def __init__(self, config): super().__init__(config) self.config = config self.gpt_neox_japanese = GPTNeoXJapaneseModel(config) self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.embed_out def set_output_embeddings(self, new_embeddings): self.embed_out = new_embeddings @add_start_docstrings_to_model_forward(GPT_NEOX_JAPANESE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b") >>> config = GPTNeoXJapaneseConfig.from_pretrained("abeja/gpt-neox-japanese-2.7b") >>> config.is_decoder = True >>> model = GPTNeoXJapaneseForCausalLM.from_pretrained("abeja/gpt-neox-japanese-2.7b", config=config) >>> inputs = tokenizer("日本語のGPT-neoxがHugging Faceで使えます😀", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox_japanese( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = outputs[0] lm_logits = self.embed_out(hidden_states) lm_loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) lm_loss = self.loss_function( lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (lm_logits,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithPast( loss=lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past __all__ = [ "GPTNeoXJapaneseForCausalLM", "GPTNeoXJapaneseLayer", "GPTNeoXJapaneseModel", "GPTNeoXJapanesePreTrainedModel", ]
transformers/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py/0
{ "file_path": "transformers/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py", "repo_id": "transformers", "token_count": 17620 }
# coding=utf-8 # Copyright 2024 IBM and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import ( BaseModelOutputWithPast, MoeCausalLMOutputWithPast, MoeModelOutputWithPast, ) from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from .configuration_granitemoe import GraniteMoeConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "GraniteMoeConfig" # Copied from transformers.models.jetmoe.modeling_jetmoe.load_balancing_loss_func def load_balancing_loss_func( gate_logits: Union[torch.Tensor, Tuple[torch.Tensor], None], num_experts: Optional[int] = None, top_k=2, attention_mask: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, int]: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: gate_logits: Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of shape [batch_size X sequence_length, num_experts]. num_experts: Number of experts top_k: The number of experts to route per-token, can be also interpreted as the `top-k` routing parameter. attention_mask (`torch.Tensor`, *optional*): The attention_mask used in forward function shape [batch_size X sequence_length] if not None. Returns: The auxiliary loss. """ if gate_logits is None or not isinstance(gate_logits, tuple): return 0 if isinstance(gate_logits, tuple): compute_device = gate_logits[0].device concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) if attention_mask is None: # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.mean(expert_mask.float(), dim=0) # Compute the average probability of routing to these experts router_prob_per_expert = torch.mean(routing_weights, dim=0) else: batch_size, sequence_length = attention_mask.shape num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask expert_attention_mask = ( attention_mask[None, :, :, None, None] .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) .reshape(-1, top_k, num_experts) .to(compute_device) ) # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( expert_attention_mask, dim=0 ) # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert router_per_expert_attention_mask = ( attention_mask[None, :, :, None] .expand((num_hidden_layers, batch_size, sequence_length, num_experts)) .reshape(-1, num_experts) .to(compute_device) ) # Compute the average probability of routing to these experts router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( router_per_expert_attention_mask, dim=0 ) overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) return overall_loss * num_experts # Copied from transformers.models.granite.modeling_granite.GraniteRMSNorm with Granite->GraniteMoe class GraniteMoeRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ GraniteMoeRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" ALL_LAYERNORM_LAYERS.append(GraniteMoeRMSNorm) # Copied from transformers.models.granite.modeling_granite.GraniteRotaryEmbedding with Granite->GraniteMoe class GraniteMoeRotaryEmbedding(nn.Module): def __init__(self, config: GraniteMoeConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset # This .to() is needed if the model has been moved to a device after being initialized (because # the buffer is automatically moved, but not the original copy) self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) # Copied from transformers.models.granite.modeling_granite.rotate_half with Granite->GraniteMoe def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.granite.modeling_granite.apply_rotary_pos_emb with Granite->GraniteMoe def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from transformers.models.jetmoe.modeling_jetmoe.JetMoeParallelExperts with JetMoe->GraniteMoe class GraniteMoeParallelExperts(nn.Module): def __init__(self, num_experts: int, input_size: int, output_size: int) -> None: """ Initialize the GraniteMoeParallelExperts module. The experts weights are stored in [num_experts, output_size, input_size] format. Such that it's comptible with many MoE libraries, such as [Megablock](https://github.com/databricks/megablocks) and [ScatterMoE](https://github.com/shawntan/scattermoe), as well as the [MoE kernel](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/fused_moe/fused_moe.py) used in vllm. Args: num_experts (int): Number of experts. input_size (int): Size of the input. output_size (int): Size of the output. """ super().__init__() self.weight = nn.Parameter(torch.empty(num_experts, output_size, input_size)) self.num_experts = num_experts self.input_size = input_size self.output_size = output_size def forward(self, inputs, expert_size): """ Forward pass of the GraniteMoeParallelExperts module. Args: inputs (Tensor): Input tensor. expert_size: Expert size information. Returns: Tensor: Output tensor. """ input_list = inputs.split(expert_size, dim=0) output_list = [] for i in range(self.num_experts): output_list.append(F.linear(input_list[i], self.weight[i])) results = torch.cat(output_list, dim=0) return results # Copied from transformers.models.jetmoe.modeling_jetmoe.JetMoeTopKGating with JetMoe->GraniteMoe class GraniteMoeTopKGating(nn.Module): def __init__(self, input_size: int, num_experts: int, top_k: int): """ Initialize the top-k gating mechanism. Args: input_size (`int`): Size of the input. num_experts (`int`): Number of experts. top_k (`int`): Number of top experts to select. """ super().__init__() self.num_experts = num_experts self.input_size = input_size self.top_k = top_k self.layer = nn.Linear(input_size, num_experts, bias=False) def forward(self, hidden_states): # compute the top_k routing decision logits = self.layer(hidden_states).float() # [batch_size x seq_len, num_experts] top_k_logits, top_k_indices = logits.topk(self.top_k, dim=1) # [num_tokens, top_k] top_k_gates = torch.softmax(top_k_logits, dim=1).type_as(hidden_states) # [num_tokens, top_k] # compute number of input given to each expert zeros = torch.zeros( [top_k_gates.size(0), self.num_experts], dtype=top_k_gates.dtype, device=top_k_gates.device ) # [num_tokens, num_experts] gates = zeros.scatter(1, top_k_indices, 1) # [num_tokens, num_experts] expert_size = gates.long().sum(0) # [num_experts,] # (This cause torch.compile to fail with `torch._dynamo.exc.Unsupported: Backend compiler failed with a fake tensor exception at`) # (and `DataDependentOutputException`) expert_size = expert_size.tolist() # sort and group input tokens according to expert assignment top_k_experts = top_k_indices.flatten() # [num_tokens * top_k] _, index_sorted_experts = top_k_experts.sort(0) # [num_tokens * top_k] batch_index = index_sorted_experts.div(self.top_k, rounding_mode="trunc") # [num_tokens * top_k] # gather the gate values for grouped input tokens top_k_gates = top_k_gates.flatten() # [num_tokens * top_k] batch_gates = top_k_gates[index_sorted_experts] # [num_tokens * top_k] return index_sorted_experts, batch_index, batch_gates, expert_size, logits class GraniteMoeMoE(nn.Module): """ A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts. Args: config: Configuration object with model hyperparameters. """ def __init__(self, config: GraniteMoeConfig): super(GraniteMoeMoE, self).__init__() self.input_size = config.hidden_size self.hidden_size = config.intermediate_size self.activation = ACT2FN[config.hidden_act] self.input_linear = GraniteMoeParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2) self.output_linear = GraniteMoeParallelExperts(config.num_local_experts, self.hidden_size, self.input_size) self.router = GraniteMoeTopKGating( input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok, ) def forward(self, layer_input): """ Forward pass of the mixture of experts layer. Args: layer_input (Tensor): Input tensor. Returns: Tensor: Output tensor. Tensor: Router logits. """ bsz, length, emb_size = layer_input.size() layer_input = layer_input.reshape(-1, emb_size) _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input) expert_inputs = layer_input[batch_index] hidden_states = self.input_linear(expert_inputs, expert_size) chunked_hidden_states = hidden_states.chunk(2, dim=-1) hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1] expert_outputs = self.output_linear(hidden_states, expert_size) expert_outputs = expert_outputs * batch_gates[:, None] zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device) layer_output = zeros.index_add(0, batch_index, expert_outputs) layer_output = layer_output.view(bsz, length, self.input_size) return layer_output, router_logits # Copied from transformers.models.granite.modeling_granite.repeat_kv with Granite->GraniteMoe def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) # copied from transformers.models.granite.modeling_granite.GraniteAttention with Granite->GraniteMoe # no longer copied after attention refactors class GraniteMoeAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: GraniteMoeConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.is_causal = True self.scaling = config.attention_multiplier if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # NO LONGER EXIST Copied from transformers.models.granite.modeling_granite.GraniteFlashAttention2 with Granite->GraniteMoe # TODO cyril: modular class GraniteMoeFlashAttention2(GraniteMoeAttention): """ GraniteMoe flash attention module. This module inherits from `GraniteMoeAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (GraniteMoeRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, softmax_scale=self.scaling, sliding_window=getattr(self, "sliding_window", None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal, ) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # NO LONGER EXIST Copied from transformers.models.granite.modeling_granite.GraniteSdpaAttention with Granite->GraniteMoe # TODO cyril: modular class GraniteMoeSdpaAttention(GraniteMoeAttention): """ GraniteMoe attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `GraniteMoeAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Adapted from GraniteMoeAttention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "GraniteMoeModel is using GraniteMoeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, scale=self.scaling, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value GRANITEMOE_ATTENTION_CLASSES = { "eager": GraniteMoeAttention, "flash_attention_2": GraniteMoeFlashAttention2, "sdpa": GraniteMoeSdpaAttention, } class GraniteMoeDecoderLayer(nn.Module): def __init__(self, config: GraniteMoeConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = GRANITEMOE_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.block_sparse_moe = GraniteMoeMoE(config) self.input_layernorm = GraniteMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = GraniteMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.residual_multiplier = config.residual_multiplier def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, output_router_logits: Optional[bool] = False, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states * self.residual_multiplier # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states, router_logits = self.block_sparse_moe(hidden_states) hidden_states = residual + hidden_states * self.residual_multiplier outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) if output_router_logits: outputs += (router_logits,) return outputs GRANITEMOE_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GraniteMoeConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare GraniteMoe Model outputting raw hidden-states without any specific head on top.", GRANITEMOE_START_DOCSTRING, ) class GraniteMoePreTrainedModel(PreTrainedModel): config_class = GraniteMoeConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["GraniteMoeDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, GraniteMoeParallelExperts): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) GRANITEMOE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. Two formats are allowed: - a [`~cache_utils.Cache`] instance; - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy cache format. The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the legacy cache format will be returned. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ @add_start_docstrings( "The bare GraniteMoe Model outputting raw hidden-states without any specific head on top.", GRANITEMOE_START_DOCSTRING, ) class GraniteMoeModel(GraniteMoePreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`GraniteMoeDecoderLayer`] Args: config: GraniteMoeConfig """ def __init__(self, config: GraniteMoeConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [GraniteMoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = GraniteMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.embedding_multiplier = config.embedding_multiplier self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta # rope self.rotary_emb = GraniteMoeRotaryEmbedding(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(GRANITEMOE_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) inputs_embeds = inputs_embeds * self.embedding_multiplier return_legacy_cache = False if use_cache and not isinstance(past_key_values, Cache): # kept for BC (non `Cache` `past_key_values` inputs) return_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) logger.warning_once( "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. " "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)" ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # embed positions hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_router_logits = () if output_router_logits else None next_decoder_cache = None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, output_router_logits, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, output_router_logits=output_router_logits, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) if output_router_logits: all_router_logits += (layer_outputs[-1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if return_legacy_cache: next_cache = next_cache.to_legacy_cache() if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, router_logits=all_router_logits, ) def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) if attention_mask is not None and attention_mask.dim() == 4: # in this case we assume that the mask comes already in inverted form and requires no inversion or slicing causal_mask = attention_mask else: causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask class GraniteMoeForCausalLM(GraniteMoePreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: GraniteMoeConfig): super().__init__(config) self.model = GraniteMoeModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.router_aux_loss_coef = config.router_aux_loss_coef self.num_experts = config.num_local_experts self.num_experts_per_tok = config.num_experts_per_tok # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(GRANITEMOE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Union[Tuple, MoeCausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, GraniteMoeForCausalLM >>> model = GraniteMoeForCausalLM.from_pretrained("ibm/PowerMoE-3b") >>> tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits / self.config.logits_scaling loss = None if labels is not None: # Upcast to float if we need to compute the loss to avoid potential precision issues logits = logits.float() # Flatten the tokens loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) aux_loss = None if output_router_logits: aux_loss = load_balancing_loss_func( outputs.router_logits if return_dict else outputs[-1], self.num_experts, self.num_experts_per_tok, attention_mask, ) if labels is not None: loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device if not return_dict: output = (logits,) + outputs[1:] if output_router_logits: output = (aux_loss,) + output return (loss,) + output if loss is not None else output return MoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits, ) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past __all__ = ["GraniteMoeForCausalLM", "GraniteMoeModel", "GraniteMoePreTrainedModel"]
transformers/src/transformers/models/granitemoe/modeling_granitemoe.py/0
{ "file_path": "transformers/src/transformers/models/granitemoe/modeling_granitemoe.py", "repo_id": "transformers", "token_count": 27807 }
# coding=utf-8 # Copyright 2021 The I-BERT Authors (Sehoon Kim, Amir Gholami, Zhewei Yao, # Michael Mahoney, Kurt Keutzer - UC Berkeley) and The HuggingFace Inc. team. # Copyright (c) 20121, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch I-BERT model.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import gelu from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_ibert import IBertConfig from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "kssteven/ibert-roberta-base" _CONFIG_FOR_DOC = "IBertConfig" class IBertEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.embedding_bit = 8 self.embedding_act_bit = 16 self.act_bit = 8 self.ln_input_bit = 22 self.ln_output_bit = 32 self.word_embeddings = QuantEmbedding( config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id, weight_bit=self.embedding_bit, quant_mode=self.quant_mode, ) self.token_type_embeddings = QuantEmbedding( config.type_vocab_size, config.hidden_size, weight_bit=self.embedding_bit, quant_mode=self.quant_mode ) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") # End copy self.padding_idx = config.pad_token_id self.position_embeddings = QuantEmbedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx, weight_bit=self.embedding_bit, quant_mode=self.quant_mode, ) # Integer-only addition between embeddings self.embeddings_act1 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode) self.embeddings_act2 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = IntLayerNorm( config.hidden_size, eps=config.layer_norm_eps, output_bit=self.ln_output_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant, ) self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length ).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds, inputs_embeds_scaling_factor = self.word_embeddings(input_ids) else: inputs_embeds_scaling_factor = None token_type_embeddings, token_type_embeddings_scaling_factor = self.token_type_embeddings(token_type_ids) embeddings, embeddings_scaling_factor = self.embeddings_act1( inputs_embeds, inputs_embeds_scaling_factor, identity=token_type_embeddings, identity_scaling_factor=token_type_embeddings_scaling_factor, ) if self.position_embedding_type == "absolute": position_embeddings, position_embeddings_scaling_factor = self.position_embeddings(position_ids) embeddings, embeddings_scaling_factor = self.embeddings_act1( embeddings, embeddings_scaling_factor, identity=position_embeddings, identity_scaling_factor=position_embeddings_scaling_factor, ) embeddings, embeddings_scaling_factor = self.LayerNorm(embeddings, embeddings_scaling_factor) embeddings = self.dropout(embeddings) embeddings, embeddings_scaling_factor = self.output_activation(embeddings, embeddings_scaling_factor) return embeddings, embeddings_scaling_factor def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class IBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.quant_mode = config.quant_mode self.weight_bit = 8 self.bias_bit = 32 self.act_bit = 8 self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size # Q, K, V Linear layers self.query = QuantLinear( config.hidden_size, self.all_head_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True, ) self.key = QuantLinear( config.hidden_size, self.all_head_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True, ) self.value = QuantLinear( config.hidden_size, self.all_head_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True, ) # Requantization (32bit -> 8bit) for Q, K, V activations self.query_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.key_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.value_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type != "absolute": raise ValueError("I-BERT only supports 'absolute' for `config.position_embedding_type`") self.softmax = IntSoftmax(self.act_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False, ): # Projection mixed_query_layer, mixed_query_layer_scaling_factor = self.query(hidden_states, hidden_states_scaling_factor) mixed_key_layer, mixed_key_layer_scaling_factor = self.key(hidden_states, hidden_states_scaling_factor) mixed_value_layer, mixed_value_layer_scaling_factor = self.value(hidden_states, hidden_states_scaling_factor) # Requantization query_layer, query_layer_scaling_factor = self.query_activation( mixed_query_layer, mixed_query_layer_scaling_factor ) key_layer, key_layer_scaling_factor = self.key_activation(mixed_key_layer, mixed_key_layer_scaling_factor) value_layer, value_layer_scaling_factor = self.value_activation( mixed_value_layer, mixed_value_layer_scaling_factor ) # Transpose query_layer = self.transpose_for_scores(query_layer) key_layer = self.transpose_for_scores(key_layer) value_layer = self.transpose_for_scores(value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) scale = math.sqrt(self.attention_head_size) attention_scores = attention_scores / scale if self.quant_mode: attention_scores_scaling_factor = query_layer_scaling_factor * key_layer_scaling_factor / scale else: attention_scores_scaling_factor = None if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in IBertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs, attention_probs_scaling_factor = self.softmax( attention_scores, attention_scores_scaling_factor ) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) if attention_probs_scaling_factor is not None: context_layer_scaling_factor = attention_probs_scaling_factor * value_layer_scaling_factor else: context_layer_scaling_factor = None context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) # requantization: 32-bit -> 8-bit context_layer, context_layer_scaling_factor = self.output_activation( context_layer, context_layer_scaling_factor ) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) output_scaling_factor = ( (context_layer_scaling_factor, attention_probs_scaling_factor) if output_attentions else (context_layer_scaling_factor,) ) return outputs, output_scaling_factor class IBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.act_bit = 8 self.weight_bit = 8 self.bias_bit = 32 self.ln_input_bit = 22 self.ln_output_bit = 32 self.dense = QuantLinear( config.hidden_size, config.hidden_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True, ) self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode) self.LayerNorm = IntLayerNorm( config.hidden_size, eps=config.layer_norm_eps, output_bit=self.ln_output_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant, ) self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor): hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor) hidden_states = self.dropout(hidden_states) hidden_states, hidden_states_scaling_factor = self.ln_input_act( hidden_states, hidden_states_scaling_factor, identity=input_tensor, identity_scaling_factor=input_tensor_scaling_factor, ) hidden_states, hidden_states_scaling_factor = self.LayerNorm(hidden_states, hidden_states_scaling_factor) hidden_states, hidden_states_scaling_factor = self.output_activation( hidden_states, hidden_states_scaling_factor ) return hidden_states, hidden_states_scaling_factor class IBertAttention(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.self = IBertSelfAttention(config) self.output = IBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False, ): self_outputs, self_outputs_scaling_factor = self.self( hidden_states, hidden_states_scaling_factor, attention_mask, head_mask, output_attentions, ) attention_output, attention_output_scaling_factor = self.output( self_outputs[0], self_outputs_scaling_factor[0], hidden_states, hidden_states_scaling_factor ) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them outputs_scaling_factor = (attention_output_scaling_factor,) + self_outputs_scaling_factor[1:] return outputs, outputs_scaling_factor class IBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.act_bit = 8 self.weight_bit = 8 self.bias_bit = 32 self.dense = QuantLinear( config.hidden_size, config.intermediate_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True, ) if config.hidden_act != "gelu": raise ValueError("I-BERT only supports 'gelu' for `config.hidden_act`") self.intermediate_act_fn = IntGELU(quant_mode=self.quant_mode, force_dequant=config.force_dequant) self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) def forward(self, hidden_states, hidden_states_scaling_factor): hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor) hidden_states, hidden_states_scaling_factor = self.intermediate_act_fn( hidden_states, hidden_states_scaling_factor ) # Requantization: 32bit -> 8-bit hidden_states, hidden_states_scaling_factor = self.output_activation( hidden_states, hidden_states_scaling_factor ) return hidden_states, hidden_states_scaling_factor class IBertOutput(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.act_bit = 8 self.weight_bit = 8 self.bias_bit = 32 self.ln_input_bit = 22 self.ln_output_bit = 32 self.dense = QuantLinear( config.intermediate_size, config.hidden_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True, ) self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode) self.LayerNorm = IntLayerNorm( config.hidden_size, eps=config.layer_norm_eps, output_bit=self.ln_output_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant, ) self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor): hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor) hidden_states = self.dropout(hidden_states) hidden_states, hidden_states_scaling_factor = self.ln_input_act( hidden_states, hidden_states_scaling_factor, identity=input_tensor, identity_scaling_factor=input_tensor_scaling_factor, ) hidden_states, hidden_states_scaling_factor = self.LayerNorm(hidden_states, hidden_states_scaling_factor) hidden_states, hidden_states_scaling_factor = self.output_activation( hidden_states, hidden_states_scaling_factor ) return hidden_states, hidden_states_scaling_factor class IBertLayer(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.act_bit = 8 self.seq_len_dim = 1 self.attention = IBertAttention(config) self.intermediate = IBertIntermediate(config) self.output = IBertOutput(config) self.pre_intermediate_act = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.pre_output_act = QuantAct(self.act_bit, quant_mode=self.quant_mode) def forward( self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False, ): self_attention_outputs, self_attention_outputs_scaling_factor = self.attention( hidden_states, hidden_states_scaling_factor, attention_mask, head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] attention_output_scaling_factor = self_attention_outputs_scaling_factor[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output, layer_output_scaling_factor = self.feed_forward_chunk( attention_output, attention_output_scaling_factor ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output, attention_output_scaling_factor): attention_output, attention_output_scaling_factor = self.pre_intermediate_act( attention_output, attention_output_scaling_factor ) intermediate_output, intermediate_output_scaling_factor = self.intermediate( attention_output, attention_output_scaling_factor ) intermediate_output, intermediate_output_scaling_factor = self.pre_output_act( intermediate_output, intermediate_output_scaling_factor ) layer_output, layer_output_scaling_factor = self.output( intermediate_output, intermediate_output_scaling_factor, attention_output, attention_output_scaling_factor ) return layer_output, layer_output_scaling_factor class IBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.quant_mode = config.quant_mode self.layer = nn.ModuleList([IBertLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = None # `config.add_cross_attention` is not supported next_decoder_cache = None # `config.use_cache` is not supported for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, hidden_states_scaling_factor, attention_mask, layer_head_mask, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class IBertPooler(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class IBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = IBertConfig base_model_prefix = "ibert" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (QuantLinear, nn.Linear)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (QuantEmbedding, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, (IntLayerNorm, nn.LayerNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) def resize_token_embeddings(self, new_num_tokens=None): raise NotImplementedError("`resize_token_embeddings` is not supported for I-BERT.") IBERT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`IBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ IBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare I-BERT Model transformer outputting raw hidden-states without any specific head on top.", IBERT_START_DOCSTRING, ) class IBertModel(IBertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.quant_mode = config.quant_mode self.embeddings = IBertEmbeddings(config) self.encoder = IBertEncoder(config) self.pooler = IBertPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, Tuple[torch.FloatTensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output, embedding_output_scaling_factor = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, embedding_output_scaling_factor, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings("""I-BERT Model with a `language modeling` head on top.""", IBERT_START_DOCSTRING) class IBertForMaskedLM(IBertPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.bias", "lm_head.decoder.weight"] def __init__(self, config): super().__init__(config) self.ibert = IBertModel(config, add_pooling_layer=False) self.lm_head = IBertLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings self.lm_head.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Used to hide legacy arguments that have been deprecated. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ibert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class IBertLMHead(nn.Module): """I-BERT Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x def _tie_weights(self) -> None: # For accelerate compatibility and to not break backward compatibility if self.decoder.bias.device.type == "meta": self.decoder.bias = self.bias else: # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @add_start_docstrings( """ I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, IBERT_START_DOCSTRING, ) class IBertForSequenceClassification(IBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.ibert = IBertModel(config, add_pooling_layer=False) self.classifier = IBertClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ibert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ I-BERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, IBERT_START_DOCSTRING, ) class IBertForMultipleChoice(IBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.ibert = IBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MultipleChoiceModelOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.ibert( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ I-BERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, IBERT_START_DOCSTRING, ) class IBertForTokenClassification(IBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.ibert = IBertModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ibert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class IBertClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): hidden_states = features[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states @add_start_docstrings( """ I-BERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, IBERT_START_DOCSTRING, ) class IBertForQuestionAnswering(IBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.ibert = IBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.FloatTensor]]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ibert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's *utils.make_positions*. Args: input_ids (`torch.LongTensor`): Indices of input sequence tokens in the vocabulary. Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx __all__ = [ "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ]
transformers/src/transformers/models/ibert/modeling_ibert.py/0
{ "file_path": "transformers/src/transformers/models/ibert/modeling_ibert.py", "repo_id": "transformers", "token_count": 24601 }
# coding=utf-8 # Copyright 2024 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Idefics2 model.""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, ModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from ...utils.deprecation import deprecate_kwarg from ..auto import AutoModel from .configuration_idefics2 import Idefics2Config, Idefics2PerceiverConfig, Idefics2VisionConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "Idefics2Config" @dataclass class Idefics2BaseModelOutputWithPast(ModelOutput): """ Base class for Idefics2 model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver """ last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->Idefics2 class Idefics2CausalLMOutputWithPast(ModelOutput): """ Base class for Idefics2 causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class Idefics2VisionEmbeddings(nn.Module): """ This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable resolution. The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://arxiv.org/abs/2307.06304) which allows treating images in their native aspect ratio and without the need to resize them to the same fixed size. In particular, we start from the original pre-trained SigLIP model (which uses images of fixed-size square images) and adapt it by training on images of variable resolutions. """ def __init__(self, config: Idefics2VisionConfig): super().__init__() self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, padding="valid", ) self.num_patches_per_side = self.image_size // self.patch_size self.num_patches = self.num_patches_per_side**2 self.num_positions = self.num_patches self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor: batch_size, _, max_im_h, max_im_w = pixel_values.shape patch_embeds = self.patch_embedding(pixel_values) embeddings = patch_embeds.flatten(2).transpose(1, 2) max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side) position_ids = torch.full(size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0) for batch_idx, p_attn_mask in enumerate(patch_attention_mask): nb_patches_h = p_attn_mask[:, 0].sum() nb_patches_w = p_attn_mask[0].sum() fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h) fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w) bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True) bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True) pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten() position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids position_ids = position_ids.to(self.position_embedding.weight.device) embeddings = embeddings + self.position_embedding(position_ids) return embeddings # Copied from transformers.models.siglip.modeling_siglip.SiglipAttention with Siglip->Idefics2Vision class Idefics2VisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" # Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__ def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) # Ignore copy self.is_causal = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" batch_size, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) k_v_seq_len = key_states.shape[-2] attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len): raise ValueError( f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class Idefics2VisionFlashAttention2(Idefics2VisionAttention): """ Idefics2Vision flash attention module. This module inherits from `Idefics2VisionAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (Idefics2VisionRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights IDEFICS_VISION_ATTENTION_CLASSES = { "eager": Idefics2VisionAttention, "flash_attention_2": Idefics2VisionFlashAttention2, } # Copied from transformers.models.siglip.modeling_siglip.SiglipMLP with Siglip->Idefics2Vision class Idefics2VisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class Idefics2MLP(nn.Module): def __init__( self, hidden_size: int, intermediate_size: int, output_size: int, hidden_act: str, ): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, output_size, bias=False) self.act_fn = ACT2FN[hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) # Copied from transformers.models.siglip.modeling_siglip.SiglipMultiheadAttentionPoolingHead with Siglip->Idefics2 class Idefics2MultiheadAttentionPoolingHead(nn.Module): """Multihead Attention Pooling.""" def __init__(self, config: Idefics2VisionConfig): super().__init__() self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Ignore copy self.mlp = Idefics2MLP( hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, output_size=config.hidden_size, ) def forward(self, hidden_state): batch_size = hidden_state.shape[0] probe = self.probe.repeat(batch_size, 1, 1) hidden_state = self.attention(probe, hidden_state, hidden_state)[0] residual = hidden_state hidden_state = self.layernorm(hidden_state) hidden_state = residual + self.mlp(hidden_state) return hidden_state[:, 0] class Idefics2EncoderLayer(nn.Module): def __init__(self, config: Idefics2VisionConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = IDEFICS_VISION_ATTENTION_CLASSES[config._attn_implementation](config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Idefics2VisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(batch, seq_len, embed_dim)`. attention_mask (`torch.FloatTensor`): Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoder with Siglip->Idefics2 class Idefics2Encoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Idefics2EncoderLayer`]. Args: config: Idefics2Config """ def __init__(self, config: Idefics2Config): super().__init__() self.config = config self.layers = nn.ModuleList([Idefics2EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False # Ignore copy def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) IDEFICS2_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Idefics2Config`] or [`Idefics2VisionConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Idefics2 Model outputting raw hidden-states without any specific head on top.", IDEFICS2_START_DOCSTRING, ) class Idefics2PreTrainedModel(PreTrainedModel): config_class = Idefics2Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Idefics2VisionAttention", "Idefics2MLP", "Idefics2PerceiverLayer", "Idefics2DecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True def _init_weights(self, module): std = ( self.config.text_config.initializer_range if hasattr(self.config, "initializer_range") else self.config.text_config.initializer_range ) if hasattr(module, "class_embedding"): module.class_embedding.data.normal_(mean=0.0, std=std) if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() IDEFICS2_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)): The tensors corresponding to the input images. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses [`CLIPImageProcessor`] for processing images). pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): Mask to avoid performing attention on padding pixel indices. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """Idefics2 vision encoder model that returnss raw image embeddings.""", IDEFICS2_START_DOCSTRING, ) class Idefics2VisionTransformer(Idefics2PreTrainedModel): _supports_sdpa = False config_class = Idefics2VisionConfig def __init__(self, config: Idefics2VisionConfig): super().__init__(config) embed_dim = config.hidden_size self.config = config self.embeddings = Idefics2VisionEmbeddings(config) self.encoder = Idefics2Encoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings = value def forward( self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_size = self.config.patch_size patch_attention_mask = torch.ones( ( batch_size, pixel_values.size(2) // patch_size, pixel_values.size(3) // patch_size, ) ) patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device) hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask) patch_attention_mask = patch_attention_mask.view(batch_size, -1) # The call to `_upad_input` in `_flash_attention_forward` is expensive # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence), # avoiding passing the attention_mask, which is equivalent to attending to the full sequence if not torch.any(~patch_attention_mask): patch_attention_mask = None elif not self._use_flash_attention_2: patch_attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=patch_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) if not return_dict: return (last_hidden_state,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Idefics2 class Idefics2RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Idefics2RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class Idefics2PerceiverAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int] = None) -> None: """Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`""" super().__init__() self.layer_idx = None self.hidden_size = config.hidden_size self.num_heads = config.resampler_n_heads self.head_dim = config.resampler_head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.attention_dropout = config.attention_dropout self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.is_causal = False def forward( self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """ Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension! Args: latents (`torch.Tensor`): Tensor of shape [bsz, n_latents, embed_dim] representing fixed length latents to compress to. context (`torch.Tensor`): Tensor of shape [bsz, seq, embed_dim] representing long-form context to resample. attention_mask (`torch.Tensor`, *optional*): Tensor of shape [bsz, 1, seq, n_latents] representing attention mask. position_ids (`torch.LongTensor`, *optional*): Tensor of shape [bsz, seq] representing position indices of each input token. past_key_value (`Tuple[torch.Tensor]`, *optional*): Tuple of tensors containing cached key and value states. output_attentions (`bool`, *optional*, defaults to `False`): Whether to return attention weights. use_cache (`bool`, *optional*, defaults to `False`): Whether to use past_key_value for caching. """ bsz, q_len, _ = latents.size() kv_seq_len = q_len + context.size()[1] hidden_states = torch.concat([context, latents], dim=-2) query_states = self.q_proj(latents) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) past_key_value = getattr(self, "past_key_value", past_key_value) if past_key_value is not None: key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # NO LONGER EXIST Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with MistralAttention->Idefics2PerceiverAttention,MistralFlashAttention->Idefics2PerceiverFlashAttention,Mistral->Idefics2 # TODO cyril: modular class Idefics2PerceiverFlashAttention2(Idefics2PerceiverAttention): """ Idefics2 flash attention module. This module inherits from `Idefics2PerceiverAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() # Ignore copy def forward( self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = latents.size() kv_seq_len = q_len + context.size()[1] # Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn! # Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents` query_states = self.q_proj(latents) key_states = self.k_proj(torch.cat([context, latents], dim=-2)) value_states = self.v_proj(torch.cat([context, latents], dim=-2)) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] if past_key_value is not None: # Activate slicing cache only if the config has a value `sliding_windows` attribute if hasattr(self.config, "sliding_window") and kv_seq_len > self.config.sliding_window: slicing_tokens = kv_seq_len - self.config.sliding_window past_key = past_key_value[0] past_value = past_key_value[1] past_key = past_key[:, :, slicing_tokens:, :].contiguous() past_value = past_value[:, :, slicing_tokens:, :].contiguous() if past_key.shape[-2] != self.config.sliding_window - 1: raise ValueError( "past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1," f" head_dim`), got {past_key.shape}" ) past_key_value = (past_key, past_value) if attention_mask is not None: attention_mask = attention_mask[:, slicing_tokens:] attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) # Reashape to the expected shape for Flash Attention key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, sliding_window=None, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value IDEFICS2_PERCEIVER_ATTENTION_CLASSES = { "eager": Idefics2PerceiverAttention, "flash_attention_2": Idefics2PerceiverFlashAttention2, } class Idefics2PerceiverLayer(nn.Module): def __init__(self, config, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.n_latents = config.resampler_n_latents self.depth = config.resampler_depth self.rms_norm_eps = config.rms_norm_eps self.input_latents_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps) self.input_context_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps) self.self_attn = IDEFICS2_PERCEIVER_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) self.post_attention_layernorm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps) self.mlp = Idefics2MLP( hidden_size=config.hidden_size, intermediate_size=config.hidden_size * 4, output_size=config.hidden_size, hidden_act=config.hidden_act, ) def forward( self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: latents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` context (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = latents latents = self.input_latents_norm(latents) context = self.input_context_norm(context) latents, self_attn_weights, present_key_value = self.self_attn( latents=latents, context=context, attention_mask=attention_mask, ) latents = residual + latents residual = latents latents = self.post_attention_layernorm(latents) latents = self.mlp(latents) latents = residual + latents outputs = (latents,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs IDEFICS2_INPUTS_DOCSTRING = r""" Args: context (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`): The hidden states of the image after vision encoder and modality projection. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ @add_start_docstrings( "Idefics2 perceiver resampler model that performs `depth` blocks of cross-attention with a fixed ", "`n_latents` inputs to decrease embedding sequence length. The Resampler acts as a form of learned pooling and ", "is derived from [Perceiver: General Perception with Iterative Attention](https://arxiv.org/abs/2103.03206)", IDEFICS2_START_DOCSTRING, ) class Idefics2PerceiverResampler(Idefics2PreTrainedModel): _supports_sdpa = False config_class = Idefics2PerceiverConfig def __init__(self, config) -> None: super().__init__(config) self.hidden_size = config.hidden_size self.hidden_act = config.hidden_act self.n_latents = config.resampler_n_latents self.depth = config.resampler_depth self.rms_norm_eps = config.rms_norm_eps # Create Latents for Perceiver self.latents = nn.Parameter(torch.ones(self.n_latents, self.hidden_size)) # Create Transformer Blocks self.layers = nn.ModuleList([Idefics2PerceiverLayer(config, idx) for idx in range(self.depth)]) self.norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" def forward( self, context: torch.Tensor, attention_mask: torch.Tensor, ) -> torch.Tensor: # seq embed -> bsz seq embed latents = self.latents.unsqueeze(0).expand((context.shape[0], *self.latents.size())) latent_attention_mask = torch.ones( (attention_mask.size(0), latents.size(1)), dtype=attention_mask.dtype, device=attention_mask.device ) attention_mask = torch.cat([attention_mask, latent_attention_mask], dim=-1) attention_mask = ( _prepare_4d_attention_mask(attention_mask, latents.dtype, tgt_len=self.n_latents) if not self._use_flash_attention_2 else attention_mask ) compressed_context = latents for perceiver_layer in self.layers: layer_outputs = perceiver_layer( compressed_context, context, attention_mask=attention_mask, position_ids=None, past_key_value=None, output_attentions=False, use_cache=False, ) compressed_context = layer_outputs[0] compressed_context = self.norm(compressed_context) return compressed_context class Idefics2Connector(nn.Module): def __init__(self, config): super().__init__() self.modality_projection = Idefics2MLP( hidden_size=config.vision_config.hidden_size, intermediate_size=config.text_config.intermediate_size, output_size=config.text_config.hidden_size, hidden_act=config.text_config.hidden_act, ) self.perceiver_resampler = Idefics2PerceiverResampler._from_config(config.perceiver_config) def forward(self, image_hidden_states, attention_mask): image_hidden_states = self.modality_projection(image_hidden_states) image_hidden_states = self.perceiver_resampler(context=image_hidden_states, attention_mask=attention_mask) return image_hidden_states IDEFICS2_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)): The tensors corresponding to the input images. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses [`CLIPImageProcessor`] for processing images). pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): Mask to avoid performing attention on padding pixel indices. image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The hidden states of the image encoder after modality projection and perceiver resampling. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """Idefics2 model consisting of a SIGLIP vision encoder and Mistral language decoder""", IDEFICS2_START_DOCSTRING, ) class Idefics2Model(Idefics2PreTrainedModel): def __init__(self, config: Idefics2Config): super().__init__(config) self.padding_idx = self.config.text_config.pad_token_id self.vocab_size = self.config.text_config.vocab_size self.vision_model = Idefics2VisionTransformer._from_config(config.vision_config) self.connector = Idefics2Connector(config) self.text_model = AutoModel.from_config(config.text_config) self.image_seq_len = config.perceiver_config.resampler_n_latents self.image_token_id = self.config.image_token_id self._use_flash_attention_2 = config.text_config._attn_implementation == "flash_attention_2" self.post_init() def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for lora when using gradient checkpointing. c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032 Override to set output.requires_grad = True for both the decoder's and vision model's embeddings. """ def get_lowest_module(module): if len(list(module.children())) == 0: # If the module has no children, it is a leaf module (e.g., Linear, Conv2d, etc.) return module else: # Recursively call the function on each child module return get_lowest_module(list(module.children())[0]) def make_inputs_require_grads(module, input, output): output.requires_grad_(True) self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) self._vision_require_grads_hook = get_lowest_module(self.vision_model).register_forward_hook( make_inputs_require_grads ) def disable_input_require_grads(self): self._text_require_grads_hook.remove() self._vision_require_grads_hook.remove() def get_input_embeddings(self): return self.text_model.get_input_embeddings() def set_input_embeddings(self, value): self.text_model.set_input_embeddings(value) def inputs_merger( self, input_ids: torch.LongTensor, inputs_embeds: Optional[torch.Tensor], image_hidden_states: Optional[torch.Tensor], ): """ This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM. The merging happens as follows: - The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`. - We get the image hidden states for the image through the vision encoder (and potentially the perceiver), and that hidden state is then projected into the text embedding space. We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer. - The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM. - To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states. """ num_images, _, vision_hidden_size = image_hidden_states.shape special_image_token_mask = input_ids == self.image_token_id new_inputs_embeds = inputs_embeds.clone() reshaped_image_hidden_states = image_hidden_states.view(-1, vision_hidden_size) new_inputs_embeds[special_image_token_mask] = reshaped_image_hidden_states return new_inputs_embeds @add_start_docstrings_to_model_forward( """ Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where max_num_images is the maximum number of images among the batch_size samples in the batch. Padding images are not needed beyond padding the pixel_values at the entrance of the model. For efficiency, we only pass through the vision_model's forward the real images by discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3. """, IDEFICS2_INPUTS_DOCSTRING, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_attention_mask: Optional[torch.BoolTensor] = None, image_hidden_states: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Idefics2BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.training and self.text_model.gradient_checkpointing and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # retrieve input_ids and inputs_embeds if input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") past_seen_tokens = 0 # kept for BC (non `Cache` `past_key_values` inputs) return_legacy_cache = False if use_cache: if not isinstance(past_key_values, Cache): return_legacy_cache = True if past_key_values is None: past_key_values = DynamicCache() else: past_key_values = DynamicCache.from_legacy_cache(past_key_values) logger.warning_once( "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and " "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class " "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)" ) past_seen_tokens = past_key_values.get_seq_length() if inputs_embeds is not None and input_ids is None and past_seen_tokens == 0: raise ValueError("When first calling the model, if input_embeds are passed, input_ids should not be None.") if inputs_embeds is None: inputs_embeds = self.text_model.get_input_embeddings()(input_ids) # START VISUAL INPUTS INTEGRATION if pixel_values is not None and image_hidden_states is not None: raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time") elif pixel_values is not None: batch_size, num_images, num_channels, height, width = pixel_values.shape pixel_values = pixel_values.to(dtype=self.dtype) # fp16 compatibility pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:]) # Remove padding images - padding images are full 0. nb_values_per_image = pixel_values.shape[1:].numel() real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image pixel_values = pixel_values[real_images_inds].contiguous() # Handle the vision attention mask if pixel_attention_mask is None: pixel_attention_mask = torch.ones( size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)), dtype=torch.bool, device=pixel_values.device, ) else: # Remove padding images from the mask/pP p pixel_attention_mask = pixel_attention_mask.view( batch_size * num_images, *pixel_attention_mask.shape[2:] ) pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous() patch_size = self.config.vision_config.patch_size patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size) patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size) patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) == patch_size * patch_size).bool() # Get sequence from the vision encoder image_hidden_states = self.vision_model( pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, ).last_hidden_state # Modality projection & resampling image_hidden_states = self.connector( image_hidden_states, attention_mask=patch_attention_mask.view(pixel_values.size(0), -1) ) elif image_hidden_states is not None: image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device) if past_seen_tokens == 0 and inputs_embeds is not None and image_hidden_states is not None: # When we generate, we don't want to replace the potential image_token_id that we generated by images # that simply don't exist inputs_embeds = self.inputs_merger( input_ids=input_ids, inputs_embeds=inputs_embeds, image_hidden_states=image_hidden_states, ) outputs = self.text_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if return_legacy_cache and use_cache: outputs.past_key_values = outputs.past_key_values.to_legacy_cache() if not return_dict: return tuple(v for v in [*outputs, image_hidden_states] if v is not None) return Idefics2BaseModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_hidden_states, ) @add_start_docstrings( """The Idefics2 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top. """, IDEFICS2_START_DOCSTRING, ) class Idefics2ForConditionalGeneration(Idefics2PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = Idefics2Model(config) self.image_token_id = self.config.image_token_id self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.vocab_size = config.text_config.vocab_size # Initialize weights and apply final processing self.post_init() def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed. """ def make_inputs_require_grads(module, input, output): output.requires_grad_(True) self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) self._vision_require_grads_hook = self.model.vision_model.get_input_embeddings().register_forward_hook( make_inputs_require_grads ) def disable_input_require_grads(self): self._text_require_grads_hook.remove() self._vision_require_grads_hook.remove() def get_input_embeddings(self): return self.model.text_model.get_input_embeddings() def set_input_embeddings(self, value): self.model.text_model.set_input_embeddings(value) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep") @add_start_docstrings_to_model_forward(IDEFICS2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Idefics2CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_attention_mask: Optional[torch.BoolTensor] = None, image_hidden_states: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, logits_to_keep: Union[int, torch.Tensor] = 0, ) -> Union[Tuple, Idefics2CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics2ForConditionalGeneration`). Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. logits_to_keep (`int` or `torch.Tensor`, *optional*): If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension. This is useful when using packed tensor format (single dimension for batch and sequence length). Returns: Example: ```python >>> import requests >>> import torch >>> from PIL import Image >>> from io import BytesIO >>> from transformers import AutoProcessor, AutoModelForVision2Seq >>> from transformers.image_utils import load_image >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg") >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg") >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg") >>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b-base") >>> model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/idefics2-8b-base", device_map="auto") >>> BAD_WORDS_IDS = processor.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids >>> EOS_WORDS_IDS = [processor.tokenizer.eos_token_id] >>> # Create inputs >>> prompts = [ ... "<image>In this image, we can see the city of New York, and more specifically the Statue of Liberty.<image>In this image,", ... "In which city is that bridge located?<image>", ... ] >>> images = [[image1, image2], [image3]] >>> inputs = processor(images=images, text=prompts, padding=True, return_tensors="pt").to("cuda") >>> # Generate >>> generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=20) >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True) >>> print(generated_texts) ['In this image, we can see the city of New York, and more specifically the Statue of Liberty. In this image, we can see the city of New York, and more specifically the Statue of Liberty.\n\n', 'In which city is that bridge located?\n\nThe bridge is located in the city of Pittsburgh, Pennsylvania.\n\n\nThe bridge is'] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_hidden_states=image_hidden_states, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: # Upcast to float if we need to compute the loss to avoid potential precision issues logits = logits.float() labels = labels.to(logits.device) # Shift so that tokens < n predict n if attention_mask is not None: # we use the input attention mask to shift the logits and labels, because it is 2D. # we also crop attn mask in case it is longer, which happens in PrefixTuning with peft shift_attention_mask = attention_mask[:, -(logits.shape[1] - 1) :].to(logits.device) shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous() shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous() else: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Idefics2CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, pixel_values=None, pixel_attention_mask=None, image_hidden_states=None, logits_to_keep=None, **kwargs, ): # Overwritten -- there are mutually exclusive inputs (if the logic to make `image_hidden_states` take # precedence is moved to the model, we can remove this fn) # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens if past_key_values is not None: if inputs_embeds is not None: # Exception 1 input_ids = input_ids[:, -cache_position.shape[0] :] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step # but IDEFICS requires noth ids and embeds to be present if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": input_ids} else: # The clone here is for the same reason as for `position_ids`. model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None} if logits_to_keep is not None: model_inputs["logits_to_keep"] = logits_to_keep if image_hidden_states is not None: pixel_values = None pixel_attention_mask = None else: pixel_values = pixel_values pixel_attention_mask = pixel_attention_mask model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "pixel_values": pixel_values, "pixel_attention_mask": pixel_attention_mask, "image_hidden_states": image_hidden_states, } ) return model_inputs def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs): model_kwargs = super()._update_model_kwargs_for_generation( outputs=outputs, model_kwargs=model_kwargs, is_encoder_decoder=is_encoder_decoder, **kwargs, ) # Get the precomputed image_hidden_states model_kwargs["image_hidden_states"] = outputs.image_hidden_states return model_kwargs @staticmethod # Copied from transformers.models.opt.modeling_opt.OPTForCausalLM._reorder_cache def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past __all__ = ["Idefics2ForConditionalGeneration", "Idefics2PreTrainedModel", "Idefics2Model"]
transformers/src/transformers/models/idefics2/modeling_idefics2.py/0
{ "file_path": "transformers/src/transformers/models/idefics2/modeling_idefics2.py", "repo_id": "transformers", "token_count": 34956 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch.nn import CrossEntropyLoss from transformers.models.instructblip.configuration_instructblip import ( InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from transformers.models.instructblip.modeling_instructblip import ( InstructBlipForConditionalGeneration, InstructBlipForConditionalGenerationModelOutput, ) from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class InstructBlipVideoVisionConfig(InstructBlipVisionConfig): pass class InstructBlipVideoQFormerConfig(InstructBlipQFormerConfig): pass class InstructBlipVideoConfig(PretrainedConfig): r""" [`InstructBlipVideoConfig`] is the configuration class to store the configuration of a [`InstructBlipVideoForConditionalGeneration`]. It is used to instantiate a Instructblipvideo model according to the specified arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Instructblipvideo [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`InstructBlipVideoVisionConfig`]. qformer_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`InstructBlipVideoQFormerConfig`]. text_config (`dict`, *optional*): Dictionary of configuration options used to initialize any [`PretrainedConfig`]. num_query_tokens (`int`, *optional*, defaults to 32): The number of query tokens passed through the Transformer. video_token_index (`int`, *optional*): Token index of special video token. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ( ... InstructBlipVideoVisionConfig, ... InstructBlipVideoQFormerConfig, ... OPTConfig, ... InstructBlipVideoConfig, ... InstructBlipVideoForConditionalGeneration, ... ) >>> # Initializing a InstructBlipVideoConfig with Salesforce/instruct-blip-flan-t5 style configuration >>> configuration = InstructBlipVideoConfig() >>> # Initializing a InstructBlipVideoForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration >>> model = InstructBlipVideoForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a InstructBlipVideoConfig from a InstructBlipVideoVisionConfig, InstructBlipVideoQFormerConfig and any PretrainedConfig >>> # Initializing Instructblipvideo vision, Instructblipvideo Q-Former and language model configurations >>> vision_config = InstructBlipVideoVisionConfig() >>> qformer_config = InstructBlipVideoQFormerConfig() >>> text_config = OPTConfig() >>> config = InstructBlipVideoConfig.from_text_vision_configs(vision_config, qformer_config, text_config) ```""" model_type = "instructblipvideo" sub_configs = { "text_config": AutoConfig, "qformer_config": InstructBlipVideoQFormerConfig, "vision_config": InstructBlipVideoVisionConfig, } def __init__( self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, video_token_index=None, **kwargs, ): super().__init__(**kwargs) if vision_config is None: vision_config = {} logger.info("vision_config is None. initializing the InstructBlipVideoVisionConfig with default values.") if qformer_config is None: qformer_config = {} logger.info("qformer_config is None. Initializing the InstructBlipVideoQFormerConfig with default values.") if text_config is None: text_config = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).") self.vision_config = InstructBlipVideoVisionConfig(**vision_config) self.qformer_config = InstructBlipVideoQFormerConfig(**qformer_config) text_model_type = text_config["model_type"] if "model_type" in text_config else "opt" self.text_config = CONFIG_MAPPING[text_model_type](**text_config) self.num_query_tokens = num_query_tokens self.video_token_index = video_token_index self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES self.initializer_factor = 1.0 self.initializer_range = 0.02 @classmethod def from_vision_qformer_text_configs( cls, vision_config: InstructBlipVideoVisionConfig, qformer_config: InstructBlipVideoQFormerConfig, text_config: PretrainedConfig, **kwargs, ): r""" Instantiate a [`InstructBlipVideoConfig`] (or a derived class) from a InstructBlipVideo vision model, Q-Former and language model configurations. Returns: [`InstructBlipVideoConfig`]: An instance of a configuration object """ return cls( vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs, ) @dataclass class InstructBlipVideoForConditionalGenerationModelOutput(InstructBlipForConditionalGenerationModelOutput): pass class InstructBlipVideoForConditionalGeneration(InstructBlipForConditionalGeneration): def forward( self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor] = None, input_ids: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> Union[Tuple, InstructBlipVideoForConditionalGenerationModelOutput]: r""" ```python >>> from transformers import InstructBlipVideoProcessor, InstructBlipVideoForConditionalGeneration >>> import torch >>> from huggingface_hub import hf_hub_download >>> import av >>> import numpy as np >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`List[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> model = InstructBlipVideoForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto") >>> processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b") >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample uniformly 4 frames from the videWhy is this video funny?o >>> total_frames = container.streams.video[0].frames >>> indices = np.arange(0, total_frames, total_frames / 4).astype(int) >>> clip = read_video_pyav(container, indices) >>> prompt = "What is happening in the video?" >>> inputs = processor(text=prompt, images=clip, return_tensors="pt").to(model.device) >>> outputs = model.generate( ... **inputs, ... do_sample=False, ... num_beams=5, ... max_length=256, ... repetition_penalty=1.5, ... length_penalty=1.0, ... ) >>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip() >>> print(generated_text) "A person is eating a bowl of pasta, and they are using a fork to eat it. The person is sitting at a table, and the plate of pasta is on the table in front" ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # step 1: forward the images through the vision encoder, # we process in a batched way, later unbatch it back (video has frames=4 always) batch_size, frames, channel, height, width = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) # difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0) qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer( input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) query_output = query_outputs[0][:, : query_tokens.size(1), :] # step 3: use the language model, conditioned on the query outputs and the prompt language_model_inputs = self.language_projection(query_output) # unbatch inputs back, each video-frame gets `num_query_tokens` seq length language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1) language_model_attention_mask = torch.ones( language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device ) inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) # if the model already has "video_token_index" then the input is expanded to account for image embeds # otherwise we expand manually by concatenating if getattr(self.config, "video_token_index", None) is not None: special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds[special_image_mask] = language_model_inputs.flatten() else: logger.warning_once( "Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. " "Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. " "Using processors without these attributes in the config is deprecated and will throw an error in v4.47." ) inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat( [language_model_attention_mask, attention_mask.to(language_model_attention_mask.device)], dim=1 ) if self.config.use_decoder_only_language_model: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None # we compute the loss here since we need to take into account the sequence length of the query embeds if labels is not None: labels = labels.to(logits.device) logits = logits[:, -labels.size(1) :, :] # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(logits.device) # Flatten the tokens loss_fct = CrossEntropyLoss(reduction="mean") loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, ) loss = outputs.loss if return_dict else outputs[0] logits = outputs.logits if return_dict else outputs[1] if not return_dict: output = (logits, vision_outputs, query_outputs, outputs) return ((loss,) + output) if loss is not None else output return InstructBlipVideoForConditionalGenerationModelOutput( loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs, ) @torch.no_grad() def generate( self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor] = None, qformer_attention_mask: Optional[torch.LongTensor] = None, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, interpolate_pos_encoding: bool = False, **generate_kwargs, ) -> torch.LongTensor: r""" Overrides `generate` function to be able to use the model as a conditional generator. Args: pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width) or (batch_size, num_frames, num_channels, height, width)): Input images or videos to be processed. qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt to be fed to the Q-Former module. qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices. input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt for the generation. attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices. interpolate_pos_encoding (`bool`, *optional*, defaults to `False`): Whether to interpolate the positional encoding of the image embeddings. Returns: captions (list): A list of strings of length batch_size * num_captions. """ if hasattr(self, "hf_device_map"): # preprocess for `accelerate` self._preprocess_accelerate() # we process in a batched way, later unbatch it back (video has frames=4) batch_size, frames, channel, height, width = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width) image_embeds = self.vision_model( pixel_values, return_dict=True, interpolate_pos_encoding=interpolate_pos_encoding, ).last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0) qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer( input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True, ) query_output = query_outputs.last_hidden_state[:, : query_tokens.size(1), :] language_model_inputs = self.language_projection(query_output) # unbatch the embeddings back by moving frames to seq-len language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1) language_attention_mask = torch.ones( language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device ) if input_ids is None: start_tokens = [self.config.text_config.bos_token_id] if getattr(self.config, "video_token_index", None) is not None: start_tokens = [self.config.video_token_index] * self.config.num_query_tokens * 4 + start_tokens input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device) input_ids = input_ids.repeat(batch_size, 1) if attention_mask is None: attention_mask = torch.ones_like(input_ids) inputs_embeds = self.get_input_embeddings()(input_ids) # if the model already has "video_token_index" then the input is expanded to account for image embeds # otherwise we expand manually by concatenating if getattr(self.config, "video_token_index", None) is not None: special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds[special_image_mask] = language_model_inputs.flatten() else: logger.warning_once( "Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. " "Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. " "Using processors without these attributes in the config is deprecated and will throw an error in v4.47." ) inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat( [language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1 ) # add image_embeds length to max_length, so that the final max_length in counted only on token embeds # -1 is to account for the prepended BOS after `generate.` if not self.language_model.config.is_encoder_decoder: generate_kwargs["max_length"] = ( generate_kwargs.get("max_length", 20) + language_model_inputs.shape[1] - 1 ) generate_kwargs["min_length"] = generate_kwargs.get("min_length", 0) + language_model_inputs.shape[1] inputs = {"inputs_embeds": inputs_embeds, "attention_mask": attention_mask} if not self.language_model.config.is_encoder_decoder: inputs["input_ids"] = input_ids outputs = self.language_model.generate(**inputs, **generate_kwargs) return outputs
transformers/src/transformers/models/instructblipvideo/modular_instructblipvideo.py/0
{ "file_path": "transformers/src/transformers/models/instructblipvideo/modular_instructblipvideo.py", "repo_id": "transformers", "token_count": 9309 }
# coding=utf-8 # Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 LayoutLM model.""" from __future__ import annotations import math import warnings from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPoolingAndCrossAttentions, TFMaskedLMOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_layoutlm import LayoutLMConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LayoutLMConfig" class TFLayoutLMEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config: LayoutLMConfig, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.max_2d_position_embeddings = config.max_2d_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("x_position_embeddings"): self.x_position_embeddings = self.add_weight( name="embeddings", shape=[self.max_2d_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("y_position_embeddings"): self.y_position_embeddings = self.add_weight( name="embeddings", shape=[self.max_2d_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("h_position_embeddings"): self.h_position_embeddings = self.add_weight( name="embeddings", shape=[self.max_2d_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("w_position_embeddings"): self.w_position_embeddings = self.add_weight( name="embeddings", shape=[self.max_2d_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def call( self, input_ids: tf.Tensor = None, bbox: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) if bbox is None: bbox = bbox = tf.fill(input_shape + [4], value=0) try: left_position_embeddings = tf.gather(self.x_position_embeddings, bbox[:, :, 0]) upper_position_embeddings = tf.gather(self.y_position_embeddings, bbox[:, :, 1]) right_position_embeddings = tf.gather(self.x_position_embeddings, bbox[:, :, 2]) lower_position_embeddings = tf.gather(self.y_position_embeddings, bbox[:, :, 3]) except IndexError as e: raise IndexError("The `bbox`coordinate values should be within 0-1000 range.") from e h_position_embeddings = tf.gather(self.h_position_embeddings, bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = tf.gather(self.w_position_embeddings, bbox[:, :, 2] - bbox[:, :, 0]) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = ( inputs_embeds + position_embeds + token_type_embeds + left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings ) final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->LayoutLM class TFLayoutLMSelfAttention(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFLayoutLMModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->LayoutLM class TFLayoutLMSelfOutput(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->LayoutLM class TFLayoutLMAttention(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFLayoutLMSelfAttention(config, name="self") self.dense_output = TFLayoutLMSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: self_outputs = self.self_attention( hidden_states=input_tensor, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) # add attentions (possibly with past_key_value) if we output them outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->LayoutLM class TFLayoutLMIntermediate(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->LayoutLM class TFLayoutLMOutput(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->LayoutLM class TFLayoutLMLayer(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, **kwargs): super().__init__(**kwargs) self.attention = TFLayoutLMAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TFLayoutLMAttention(config, name="crossattention") self.intermediate = TFLayoutLMIntermediate(config, name="intermediate") self.bert_output = TFLayoutLMOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: Tuple[tf.Tensor] | None, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) if getattr(self, "crossattention", None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->LayoutLM class TFLayoutLMEncoder(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFLayoutLMLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_values: Tuple[Tuple[tf.Tensor]] | None, use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->LayoutLM class TFLayoutLMPooler(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->LayoutLM class TFLayoutLMPredictionHeadTransform(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->LayoutLM class TFLayoutLMLMPredictionHead(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.transform = TFLayoutLMPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) def get_output_embeddings(self) -> keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->LayoutLM class TFLayoutLMMLMHead(keras.layers.Layer): def __init__(self, config: LayoutLMConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFLayoutLMLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) @keras_serializable class TFLayoutLMMainLayer(keras.layers.Layer): config_class = LayoutLMConfig def __init__(self, config: LayoutLMConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFLayoutLMEmbeddings(config, name="embeddings") self.encoder = TFLayoutLMEncoder(config, name="encoder") self.pooler = TFLayoutLMPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, bbox: np.ndarray | tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if bbox is None: bbox = tf.fill(dims=input_shape + [4], value=0) embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, # Need to pass these required positional arguments to `Encoder` encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=None, past_key_values=None, use_cache=False, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFLayoutLMPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LayoutLMConfig base_model_prefix = "layoutlm" @property def input_signature(self): signature = super().input_signature signature["bbox"] = tf.TensorSpec(shape=(None, None, 4), dtype=tf.int32, name="bbox") return signature LAYOUTLM_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`LayoutLMConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ LAYOUTLM_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) bbox (`Numpy array` or `tf.Tensor` of shape `({0}, 4)`, *optional*): Bounding Boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings- 1]`. attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top.", LAYOUTLM_START_DOCSTRING, ) class TFLayoutLMModel(TFLayoutLMPreTrainedModel): def __init__(self, config: LayoutLMConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.layoutlm = TFLayoutLMMainLayer(config, name="layoutlm") @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings( output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC ) def call( self, input_ids: TFModelInputType | None = None, bbox: np.ndarray | tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, TFLayoutLMModel >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "world"] >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] >>> token_boxes = [] >>> for word, box in zip(words, normalized_word_boxes): ... word_tokens = tokenizer.tokenize(word) ... token_boxes.extend([box] * len(word_tokens)) >>> # add bounding boxes of cls + sep tokens >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] >>> encoding = tokenizer(" ".join(words), return_tensors="tf") >>> input_ids = encoding["input_ids"] >>> attention_mask = encoding["attention_mask"] >>> token_type_ids = encoding["token_type_ids"] >>> bbox = tf.convert_to_tensor([token_boxes]) >>> outputs = model( ... input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids ... ) >>> last_hidden_states = outputs.last_hidden_state ```""" outputs = self.layoutlm( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlm", None) is not None: with tf.name_scope(self.layoutlm.name): self.layoutlm.build(None) @add_start_docstrings("""LayoutLM Model with a `language modeling` head on top.""", LAYOUTLM_START_DOCSTRING) class TFLayoutLMForMaskedLM(TFLayoutLMPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"pooler", r"cls.seq_relationship", r"cls.predictions.decoder.weight", r"nsp___cls", ] def __init__(self, config: LayoutLMConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if config.is_decoder: logger.warning( "If you want to use `TFLayoutLMForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.layoutlm = TFLayoutLMMainLayer(config, add_pooling_layer=True, name="layoutlm") self.mlm = TFLayoutLMMLMHead(config, input_embeddings=self.layoutlm.embeddings, name="mlm___cls") def get_lm_head(self) -> keras.layers.Layer: return self.mlm.predictions def get_prefix_bias_name(self) -> str: warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, bbox: np.ndarray | tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, TFLayoutLMForMaskedLM >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = TFLayoutLMForMaskedLM.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "[MASK]"] >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] >>> token_boxes = [] >>> for word, box in zip(words, normalized_word_boxes): ... word_tokens = tokenizer.tokenize(word) ... token_boxes.extend([box] * len(word_tokens)) >>> # add bounding boxes of cls + sep tokens >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] >>> encoding = tokenizer(" ".join(words), return_tensors="tf") >>> input_ids = encoding["input_ids"] >>> attention_mask = encoding["attention_mask"] >>> token_type_ids = encoding["token_type_ids"] >>> bbox = tf.convert_to_tensor([token_boxes]) >>> labels = tokenizer("Hello world", return_tensors="tf")["input_ids"] >>> outputs = model( ... input_ids=input_ids, ... bbox=bbox, ... attention_mask=attention_mask, ... token_type_ids=token_type_ids, ... labels=labels, ... ) >>> loss = outputs.loss ```""" outputs = self.layoutlm( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.mlm(sequence_output=sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlm", None) is not None: with tf.name_scope(self.layoutlm.name): self.layoutlm.build(None) if getattr(self, "mlm", None) is not None: with tf.name_scope(self.mlm.name): self.mlm.build(None) @add_start_docstrings( """ LayoutLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, LAYOUTLM_START_DOCSTRING, ) class TFLayoutLMForSequenceClassification(TFLayoutLMPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: LayoutLMConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.layoutlm = TFLayoutLMMainLayer(config, name="layoutlm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, bbox: np.ndarray | tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoTokenizer, TFLayoutLMForSequenceClassification >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "world"] >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] >>> token_boxes = [] >>> for word, box in zip(words, normalized_word_boxes): ... word_tokens = tokenizer.tokenize(word) ... token_boxes.extend([box] * len(word_tokens)) >>> # add bounding boxes of cls + sep tokens >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] >>> encoding = tokenizer(" ".join(words), return_tensors="tf") >>> input_ids = encoding["input_ids"] >>> attention_mask = encoding["attention_mask"] >>> token_type_ids = encoding["token_type_ids"] >>> bbox = tf.convert_to_tensor([token_boxes]) >>> sequence_label = tf.convert_to_tensor([1]) >>> outputs = model( ... input_ids=input_ids, ... bbox=bbox, ... attention_mask=attention_mask, ... token_type_ids=token_type_ids, ... labels=sequence_label, ... ) >>> loss = outputs.loss >>> logits = outputs.logits ```""" outputs = self.layoutlm( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlm", None) is not None: with tf.name_scope(self.layoutlm.name): self.layoutlm.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, LAYOUTLM_START_DOCSTRING, ) class TFLayoutLMForTokenClassification(TFLayoutLMPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"pooler", r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship", ] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: LayoutLMConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.layoutlm = TFLayoutLMMainLayer(config, add_pooling_layer=True, name="layoutlm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, bbox: np.ndarray | tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Returns: Examples: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFLayoutLMForTokenClassification >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "world"] >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782] >>> token_boxes = [] >>> for word, box in zip(words, normalized_word_boxes): ... word_tokens = tokenizer.tokenize(word) ... token_boxes.extend([box] * len(word_tokens)) >>> # add bounding boxes of cls + sep tokens >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]] >>> encoding = tokenizer(" ".join(words), return_tensors="tf") >>> input_ids = encoding["input_ids"] >>> attention_mask = encoding["attention_mask"] >>> token_type_ids = encoding["token_type_ids"] >>> bbox = tf.convert_to_tensor([token_boxes]) >>> token_labels = tf.convert_to_tensor([1, 1, 0, 0]) >>> outputs = model( ... input_ids=input_ids, ... bbox=bbox, ... attention_mask=attention_mask, ... token_type_ids=token_type_ids, ... labels=token_labels, ... ) >>> loss = outputs.loss >>> logits = outputs.logits ```""" outputs = self.layoutlm( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(inputs=sequence_output, training=training) logits = self.classifier(inputs=sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlm", None) is not None: with tf.name_scope(self.layoutlm.name): self.layoutlm.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ LayoutLM Model with a span classification head on top for extractive question-answering tasks such as [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the final hidden-states output to compute `span start logits` and `span end logits`). """, LAYOUTLM_START_DOCSTRING, ) class TFLayoutLMForQuestionAnswering(TFLayoutLMPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [ r"pooler", r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship", ] def __init__(self, config: LayoutLMConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.layoutlm = TFLayoutLMMainLayer(config, add_pooling_layer=True, name="layoutlm") self.qa_outputs = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, bbox: np.ndarray | tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: Examples: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFLayoutLMForQuestionAnswering >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("impira/layoutlm-document-qa", add_prefix_space=True) >>> model = TFLayoutLMForQuestionAnswering.from_pretrained("impira/layoutlm-document-qa", revision="1e3ebac") >>> dataset = load_dataset("nielsr/funsd", split="train", trust_remote_code=True) >>> example = dataset[0] >>> question = "what's his name?" >>> words = example["words"] >>> boxes = example["bboxes"] >>> encoding = tokenizer( ... question.split(), words, is_split_into_words=True, return_token_type_ids=True, return_tensors="tf" ... ) >>> bbox = [] >>> for i, s, w in zip(encoding.input_ids[0], encoding.sequence_ids(0), encoding.word_ids(0)): ... if s == 1: ... bbox.append(boxes[w]) ... elif i == tokenizer.sep_token_id: ... bbox.append([1000] * 4) ... else: ... bbox.append([0] * 4) >>> encoding["bbox"] = tf.convert_to_tensor([bbox]) >>> word_ids = encoding.word_ids(0) >>> outputs = model(**encoding) >>> loss = outputs.loss >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits >>> start, end = word_ids[tf.math.argmax(start_scores, -1)[0]], word_ids[tf.math.argmax(end_scores, -1)[0]] >>> print(" ".join(words[start : end + 1])) M. Hamann P. Harper, P. Martinez ```""" outputs = self.layoutlm( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(inputs=sequence_output) start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) start_logits = tf.squeeze(input=start_logits, axis=-1) end_logits = tf.squeeze(input=end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlm", None) is not None: with tf.name_scope(self.layoutlm.name): self.layoutlm.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) __all__ = [ "TFLayoutLMForMaskedLM", "TFLayoutLMForSequenceClassification", "TFLayoutLMForTokenClassification", "TFLayoutLMForQuestionAnswering", "TFLayoutLMMainLayer", "TFLayoutLMModel", "TFLayoutLMPreTrainedModel", ]
transformers/src/transformers/models/layoutlm/modeling_tf_layoutlm.py/0
{ "file_path": "transformers/src/transformers/models/layoutlm/modeling_tf_layoutlm.py", "repo_id": "transformers", "token_count": 31749 }
# coding=utf-8 # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 LayoutLMv3 model.""" from __future__ import annotations import collections import math from typing import List, Optional, Tuple, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from .configuration_layoutlmv3 import LayoutLMv3Config _CONFIG_FOR_DOC = "LayoutLMv3Config" _DUMMY_INPUT_IDS = [ [7, 6, 1], [1, 2, 0], ] _DUMMY_BBOX = [ [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]], ] LARGE_NEGATIVE = -1e8 class TFLayoutLMv3PatchEmbeddings(keras.layers.Layer): """LayoutLMv3 image (patch) embeddings.""" def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) patch_sizes = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) self.proj = keras.layers.Conv2D( filters=config.hidden_size, kernel_size=patch_sizes, strides=patch_sizes, padding="valid", data_format="channels_last", use_bias=True, kernel_initializer=get_initializer(config.initializer_range), name="proj", ) self.hidden_size = config.hidden_size self.num_patches = (config.input_size**2) // (patch_sizes[0] * patch_sizes[1]) self.config = config def call(self, pixel_values: tf.Tensor) -> tf.Tensor: # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1]) embeddings = self.proj(pixel_values) embeddings = tf.reshape(embeddings, (-1, self.num_patches, self.hidden_size)) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "proj", None) is not None: with tf.name_scope(self.proj.name): self.proj.build([None, None, None, self.config.num_channels]) class TFLayoutLMv3TextEmbeddings(keras.layers.Layer): """ LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings. """ def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.word_embeddings = keras.layers.Embedding( config.vocab_size, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="word_embeddings", ) self.token_type_embeddings = keras.layers.Embedding( config.type_vocab_size, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="token_type_embeddings", ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.padding_token_index = config.pad_token_id self.position_embeddings = keras.layers.Embedding( config.max_position_embeddings, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="position_embeddings", ) self.x_position_embeddings = keras.layers.Embedding( config.max_2d_position_embeddings, config.coordinate_size, embeddings_initializer=get_initializer(config.initializer_range), name="x_position_embeddings", ) self.y_position_embeddings = keras.layers.Embedding( config.max_2d_position_embeddings, config.coordinate_size, embeddings_initializer=get_initializer(config.initializer_range), name="y_position_embeddings", ) self.h_position_embeddings = keras.layers.Embedding( config.max_2d_position_embeddings, config.shape_size, embeddings_initializer=get_initializer(config.initializer_range), name="h_position_embeddings", ) self.w_position_embeddings = keras.layers.Embedding( config.max_2d_position_embeddings, config.shape_size, embeddings_initializer=get_initializer(config.initializer_range), name="w_position_embeddings", ) self.max_2d_positions = config.max_2d_position_embeddings self.config = config def calculate_spatial_position_embeddings(self, bbox: tf.Tensor) -> tf.Tensor: try: left_position_ids = bbox[:, :, 0] upper_position_ids = bbox[:, :, 1] right_position_ids = bbox[:, :, 2] lower_position_ids = bbox[:, :, 3] except IndexError as exception: raise IndexError("Bounding box is not of shape (batch_size, seq_length, 4).") from exception try: left_position_embeddings = self.x_position_embeddings(left_position_ids) upper_position_embeddings = self.y_position_embeddings(upper_position_ids) right_position_embeddings = self.x_position_embeddings(right_position_ids) lower_position_embeddings = self.y_position_embeddings(lower_position_ids) except IndexError as exception: raise IndexError( f"The `bbox` coordinate values should be within 0-{self.max_2d_positions} range." ) from exception max_position_id = self.max_2d_positions - 1 h_position_embeddings = self.h_position_embeddings( tf.clip_by_value(bbox[:, :, 3] - bbox[:, :, 1], 0, max_position_id) ) w_position_embeddings = self.w_position_embeddings( tf.clip_by_value(bbox[:, :, 2] - bbox[:, :, 0], 0, max_position_id) ) # LayoutLMv1 sums the spatial embeddings, but LayoutLMv3 concatenates them. spatial_position_embeddings = tf.concat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], axis=-1, ) return spatial_position_embeddings def create_position_ids_from_inputs_embeds(self, inputs_embds: tf.Tensor) -> tf.Tensor: """ We are provided embeddings directly. We cannot infer which are padded, so just generate sequential position ids. """ input_shape = tf.shape(inputs_embds) sequence_length = input_shape[1] start_index = self.padding_token_index + 1 end_index = self.padding_token_index + sequence_length + 1 position_ids = tf.range(start_index, end_index, dtype=tf.int32) batch_size = input_shape[0] position_ids = tf.reshape(position_ids, (1, sequence_length)) position_ids = tf.tile(position_ids, (batch_size, 1)) return position_ids def create_position_ids_from_input_ids(self, input_ids: tf.Tensor) -> tf.Tensor: """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_token_index + 1. """ mask = tf.cast(tf.not_equal(input_ids, self.padding_token_index), input_ids.dtype) position_ids = tf.cumsum(mask, axis=1) * mask position_ids = position_ids + self.padding_token_index return position_ids def create_position_ids(self, input_ids: tf.Tensor, inputs_embeds: tf.Tensor) -> tf.Tensor: if input_ids is None: return self.create_position_ids_from_inputs_embeds(inputs_embeds) else: return self.create_position_ids_from_input_ids(input_ids) def call( self, input_ids: tf.Tensor | None = None, bbox: tf.Tensor = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, training: bool = False, ) -> tf.Tensor: if position_ids is None: position_ids = self.create_position_ids(input_ids, inputs_embeds) if input_ids is not None: input_shape = tf.shape(input_ids) else: input_shape = tf.shape(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.zeros(input_shape, dtype=position_ids.dtype) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.word_embeddings.input_dim) inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox) embeddings += spatial_position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings, training=training) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "word_embeddings", None) is not None: with tf.name_scope(self.word_embeddings.name): self.word_embeddings.build(None) if getattr(self, "token_type_embeddings", None) is not None: with tf.name_scope(self.token_type_embeddings.name): self.token_type_embeddings.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "position_embeddings", None) is not None: with tf.name_scope(self.position_embeddings.name): self.position_embeddings.build(None) if getattr(self, "x_position_embeddings", None) is not None: with tf.name_scope(self.x_position_embeddings.name): self.x_position_embeddings.build(None) if getattr(self, "y_position_embeddings", None) is not None: with tf.name_scope(self.y_position_embeddings.name): self.y_position_embeddings.build(None) if getattr(self, "h_position_embeddings", None) is not None: with tf.name_scope(self.h_position_embeddings.name): self.h_position_embeddings.build(None) if getattr(self, "w_position_embeddings", None) is not None: with tf.name_scope(self.w_position_embeddings.name): self.w_position_embeddings.build(None) class TFLayoutLMv3SelfAttention(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.attention_score_normaliser = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value", ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias self.config = config def transpose_for_scores(self, x: tf.Tensor): shape = tf.shape(x) new_shape = ( shape[0], # batch_size shape[1], # seq_length self.num_attention_heads, self.attention_head_size, ) x = tf.reshape(x, new_shape) return tf.transpose(x, perm=[0, 2, 1, 3]) # batch_size, num_heads, seq_length, attention_head_size def cogview_attention(self, attention_scores: tf.Tensor, alpha: Union[float, int] = 32): """ https://arxiv.org/abs/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation (PB-Relax). A replacement of the original keras.layers.Softmax(axis=-1)(attention_scores). Seems the new attention_probs will result in a slower speed and a little bias. Can use tf.debugging.assert_near(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison. The smaller atol (e.g., 1e-08), the better. """ scaled_attention_scores = attention_scores / alpha max_value = tf.expand_dims(tf.reduce_max(scaled_attention_scores, axis=-1), axis=-1) new_attention_scores = (scaled_attention_scores - max_value) * alpha return tf.math.softmax(new_attention_scores, axis=-1) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None, head_mask: tf.Tensor | None, output_attentions: bool, rel_pos: tf.Tensor | None = None, rel_2d_pos: tf.Tensor | None = None, training: bool = False, ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(self.query(hidden_states)) # Take the dot product between "query" and "key" to get the raw attention scores. normalised_query_layer = query_layer / self.attention_score_normaliser transposed_key_layer = tf.transpose( key_layer, perm=[0, 1, 3, 2] ) # batch_size, num_heads, attention_head_size, seq_length attention_scores = tf.matmul(normalised_query_layer, transposed_key_layer) if self.has_relative_attention_bias and self.has_spatial_attention_bias: attention_scores += (rel_pos + rel_2d_pos) / self.attention_score_normaliser elif self.has_relative_attention_bias: attention_scores += rel_pos / self.attention_score_normaliser if attention_mask is not None: # Apply the attention mask (is precomputed for all layers in TFLayoutLMv3Model call() function) attention_scores += attention_mask # Normalize the attention scores to probabilities. # Use the trick of CogView paper to stabilize training. attention_probs = self.cogview_attention(attention_scores) attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to. if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose( context_layer, perm=[0, 2, 1, 3] ) # batch_size, seq_length, num_heads, attention_head_size shape = tf.shape(context_layer) context_layer = tf.reshape( context_layer, (shape[0], shape[1], self.all_head_size) ) # batch_size, seq_length, num_heads * attention_head_size outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) # Copied from models.roberta.modeling_tf_roberta.TFRobertaSelfOutput class TFLayoutLMv3SelfOutput(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLayoutLMv3Attention(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.self_attention = TFLayoutLMv3SelfAttention(config, name="self") self.self_output = TFLayoutLMv3SelfOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None, head_mask: tf.Tensor | None, output_attentions: bool, rel_pos: tf.Tensor | None = None, rel_2d_pos: tf.Tensor | None = None, training: bool = False, ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]: self_outputs = self.self_attention( hidden_states, attention_mask, head_mask, output_attentions, rel_pos, rel_2d_pos, training=training, ) attention_output = self.self_output(self_outputs[0], hidden_states, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "self_output", None) is not None: with tf.name_scope(self.self_output.name): self.self_output.build(None) # Copied from models.roberta.modeling_tf_bert.TFRobertaIntermediate class TFLayoutLMv3Intermediate(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from models.roberta.modeling_tf_bert.TFRobertaOutput class TFLayoutLMv3Output(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLayoutLMv3Layer(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.attention = TFLayoutLMv3Attention(config, name="attention") self.intermediate = TFLayoutLMv3Intermediate(config, name="intermediate") self.bert_output = TFLayoutLMv3Output(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None, head_mask: tf.Tensor | None, output_attentions: bool, rel_pos: tf.Tensor | None = None, rel_2d_pos: tf.Tensor | None = None, training: bool = False, ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]: self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, training=training, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.bert_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + outputs return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) class TFLayoutLMv3Encoder(keras.layers.Layer): def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFLayoutLMv3Layer(config, name=f"layer.{i}") for i in range(config.num_hidden_layers)] self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias if self.has_relative_attention_bias: self.rel_pos_bins = config.rel_pos_bins self.max_rel_pos = config.max_rel_pos self.rel_pos_bias = keras.layers.Dense( units=config.num_attention_heads, kernel_initializer=get_initializer(config.initializer_range), use_bias=False, name="rel_pos_bias", ) if self.has_spatial_attention_bias: self.max_rel_2d_pos = config.max_rel_2d_pos self.rel_2d_pos_bins = config.rel_2d_pos_bins self.rel_pos_x_bias = keras.layers.Dense( units=config.num_attention_heads, kernel_initializer=get_initializer(config.initializer_range), use_bias=False, name="rel_pos_x_bias", ) self.rel_pos_y_bias = keras.layers.Dense( units=config.num_attention_heads, kernel_initializer=get_initializer(config.initializer_range), use_bias=False, name="rel_pos_y_bias", ) def relative_position_bucket(self, relative_positions: tf.Tensor, num_buckets: int, max_distance: int): # the negative relative positions are assigned to the interval [0, num_buckets / 2] # we deal with this by assigning absolute relative positions to the interval [0, num_buckets / 2] # and then offsetting the positive relative positions by num_buckets / 2 at the end num_buckets = num_buckets // 2 buckets = tf.abs(relative_positions) # half of the buckets are for exact increments in positions max_exact_buckets = num_buckets // 2 is_small = buckets < max_exact_buckets # the other half of the buckets are for logarithmically bigger bins in positions up to max_distance buckets_log_ratio = tf.math.log(tf.cast(buckets, tf.float32) / max_exact_buckets) distance_log_ratio = math.log(max_distance / max_exact_buckets) buckets_big_offset = ( buckets_log_ratio / distance_log_ratio * (num_buckets - max_exact_buckets) ) # scale is [0, num_buckets - max_exact_buckets] buckets_big = max_exact_buckets + buckets_big_offset # scale is [max_exact_buckets, num_buckets] buckets_big = tf.cast(buckets_big, buckets.dtype) buckets_big = tf.minimum(buckets_big, num_buckets - 1) return (tf.cast(relative_positions > 0, buckets.dtype) * num_buckets) + tf.where( is_small, buckets, buckets_big ) def _cal_pos_emb( self, dense_layer: keras.layers.Dense, position_ids: tf.Tensor, num_buckets: int, max_distance: int, ): rel_pos_matrix = tf.expand_dims(position_ids, axis=-2) - tf.expand_dims(position_ids, axis=-1) rel_pos = self.relative_position_bucket(rel_pos_matrix, num_buckets, max_distance) rel_pos_one_hot = tf.one_hot(rel_pos, depth=num_buckets, dtype=self.compute_dtype) embedding = dense_layer(rel_pos_one_hot) # batch_size, seq_length, seq_length, num_heads --> batch_size, num_heads, seq_length, seq_length embedding = tf.transpose(embedding, [0, 3, 1, 2]) embedding = tf.cast(embedding, dtype=self.compute_dtype) return embedding def _cal_1d_pos_emb(self, position_ids: tf.Tensor): return self._cal_pos_emb(self.rel_pos_bias, position_ids, self.rel_pos_bins, self.max_rel_pos) def _cal_2d_pos_emb(self, bbox: tf.Tensor): position_coord_x = bbox[:, :, 0] # left position_coord_y = bbox[:, :, 3] # bottom rel_pos_x = self._cal_pos_emb( self.rel_pos_x_bias, position_coord_x, self.rel_2d_pos_bins, self.max_rel_2d_pos, ) rel_pos_y = self._cal_pos_emb( self.rel_pos_y_bias, position_coord_y, self.rel_2d_pos_bins, self.max_rel_2d_pos, ) rel_2d_pos = rel_pos_x + rel_pos_y return rel_2d_pos def call( self, hidden_states: tf.Tensor, bbox: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, position_ids: tf.Tensor | None = None, training: bool = False, ) -> Union[ TFBaseModelOutput, Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor], ]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None rel_pos = self._cal_1d_pos_emb(position_ids) if self.has_relative_attention_bias else None rel_2d_pos = self._cal_2d_pos_emb(bbox) if self.has_spatial_attention_bias else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if return_dict: return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) else: return tuple( value for value in [hidden_states, all_hidden_states, all_self_attentions] if value is not None ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "rel_pos_bias", None) is not None: with tf.name_scope(self.rel_pos_bias.name): self.rel_pos_bias.build([None, None, self.rel_pos_bins]) if getattr(self, "rel_pos_x_bias", None) is not None: with tf.name_scope(self.rel_pos_x_bias.name): self.rel_pos_x_bias.build([None, None, self.rel_2d_pos_bins]) if getattr(self, "rel_pos_y_bias", None) is not None: with tf.name_scope(self.rel_pos_y_bias.name): self.rel_pos_y_bias.build([None, None, self.rel_2d_pos_bins]) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFLayoutLMv3MainLayer(keras.layers.Layer): config_class = LayoutLMv3Config def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.config = config if config.text_embed: self.embeddings = TFLayoutLMv3TextEmbeddings(config, name="embeddings") if config.visual_embed: self.patch_embed = TFLayoutLMv3PatchEmbeddings(config, name="patch_embed") self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") if config.has_relative_attention_bias or config.has_spatial_attention_bias: image_size = config.input_size // config.patch_size self.init_visual_bbox(image_size=(image_size, image_size)) self.norm = keras.layers.LayerNormalization(epsilon=1e-6, name="norm") self.encoder = TFLayoutLMv3Encoder(config, name="encoder") def build(self, input_shape=None): if self.config.visual_embed: image_size = self.config.input_size // self.config.patch_size self.cls_token = self.add_weight( shape=(1, 1, self.config.hidden_size), initializer="zeros", trainable=True, dtype=tf.float32, name="cls_token", ) self.pos_embed = self.add_weight( shape=(1, image_size * image_size + 1, self.config.hidden_size), initializer="zeros", trainable=True, dtype=tf.float32, name="pos_embed", ) if self.built: return self.built = True if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "patch_embed", None) is not None: with tf.name_scope(self.patch_embed.name): self.patch_embed.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build([None, None, self.config.hidden_size]) def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings.word_embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.word_embeddings.weight = value # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError def init_visual_bbox(self, image_size: Tuple[int, int], max_len: int = 1000): # We should not hardcode max_len to 1000, but it is done by the reference implementation, # so we keep it for compatibility with the pretrained weights. The more correct approach # would have been to pass on max_len=config.max_2d_position_embeddings - 1. height, width = image_size visual_bbox_x = tf.range(0, max_len * (width + 1), max_len) // width visual_bbox_x = tf.expand_dims(visual_bbox_x, axis=0) visual_bbox_x = tf.tile(visual_bbox_x, [width, 1]) # (width, width + 1) visual_bbox_y = tf.range(0, max_len * (height + 1), max_len) // height visual_bbox_y = tf.expand_dims(visual_bbox_y, axis=1) visual_bbox_y = tf.tile(visual_bbox_y, [1, height]) # (height + 1, height) visual_bbox = tf.stack( [visual_bbox_x[:, :-1], visual_bbox_y[:-1], visual_bbox_x[:, 1:], visual_bbox_y[1:]], axis=-1, ) visual_bbox = tf.reshape(visual_bbox, [-1, 4]) cls_token_box = tf.constant([[1, 1, max_len - 1, max_len - 1]], dtype=tf.int32) self.visual_bbox = tf.concat([cls_token_box, visual_bbox], axis=0) def calculate_visual_bbox(self, batch_size: int, dtype: tf.DType): visual_bbox = tf.expand_dims(self.visual_bbox, axis=0) visual_bbox = tf.tile(visual_bbox, [batch_size, 1, 1]) visual_bbox = tf.cast(visual_bbox, dtype=dtype) return visual_bbox def embed_image(self, pixel_values: tf.Tensor) -> tf.Tensor: embeddings = self.patch_embed(pixel_values) # add [CLS] token batch_size = tf.shape(embeddings)[0] cls_tokens = tf.tile(self.cls_token, [batch_size, 1, 1]) embeddings = tf.concat([cls_tokens, embeddings], axis=1) # add position embeddings if getattr(self, "pos_embed", None) is not None: embeddings += self.pos_embed embeddings = self.norm(embeddings) return embeddings def get_extended_attention_mask(self, attention_mask: tf.Tensor) -> tf.Tensor: # Adapted from transformers.modelling_utils.ModuleUtilsMixin.get_extended_attention_mask n_dims = len(attention_mask.shape) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if n_dims == 3: extended_attention_mask = tf.expand_dims(attention_mask, axis=1) elif n_dims == 2: # Provided a padding mask of dimensions [batch_size, seq_length]. # Make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]. extended_attention_mask = tf.expand_dims(attention_mask, axis=1) # (batch_size, 1, seq_length) extended_attention_mask = tf.expand_dims(extended_attention_mask, axis=1) # (batch_size, 1, 1, seq_length) else: raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape}).") # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, self.compute_dtype) extended_attention_mask = (1.0 - extended_attention_mask) * LARGE_NEGATIVE return extended_attention_mask def get_head_mask(self, head_mask: tf.Tensor | None) -> Union[tf.Tensor, List[tf.Tensor | None]]: if head_mask is None: return [None] * self.config.num_hidden_layers n_dims = tf.rank(head_mask) if n_dims == 1: # Gets a tensor with masks for each head (H). head_mask = tf.expand_dims(head_mask, axis=0) # 1, num_heads head_mask = tf.expand_dims(head_mask, axis=0) # 1, 1, num_heads head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1 head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1, 1 head_mask = tf.tile( head_mask, [self.config.num_hidden_layers, 1, 1, 1, 1] ) # seq_length, 1, num_heads, 1, 1 elif n_dims == 2: # Gets a tensor with masks for each layer (L) and head (H). head_mask = tf.expand_dims(head_mask, axis=1) # seq_length, 1, num_heads head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1 head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1, 1 elif n_dims != 5: raise ValueError(f"Wrong shape for head_mask (shape {head_mask.shape}).") assert tf.rank(head_mask) == 5, f"Got head_mask rank of {tf.rank(head_mask)}, but require 5." head_mask = tf.cast(head_mask, self.compute_dtype) return head_mask @unpack_inputs def call( self, input_ids: tf.Tensor | None = None, bbox: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[ TFBaseModelOutput, Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor], ]: # This method can be called with a variety of modalities: # 1. text + layout # 2. text + layout + image # 3. image # The complexity of this method is mostly just due to handling of these different modalities. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if input_ids is not None: input_shape = tf.shape(input_ids) batch_size = input_shape[0] seq_length = input_shape[1] elif inputs_embeds is not None: input_shape = tf.shape(inputs_embeds) batch_size = input_shape[0] seq_length = input_shape[1] elif pixel_values is not None: batch_size = tf.shape(pixel_values)[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values") # Determine which integer dtype to use. if input_ids is not None: int_dtype = input_ids.dtype elif bbox is not None: int_dtype = bbox.dtype elif attention_mask is not None: int_dtype = attention_mask.dtype elif token_type_ids is not None: int_dtype = token_type_ids.dtype else: int_dtype = tf.int32 if input_ids is not None or inputs_embeds is not None: if attention_mask is None: attention_mask = tf.ones((batch_size, seq_length), dtype=int_dtype) if token_type_ids is None: token_type_ids = tf.zeros((batch_size, seq_length), dtype=int_dtype) if bbox is None: bbox = tf.zeros((batch_size, seq_length, 4), dtype=int_dtype) embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training, ) final_bbox = None final_position_ids = None if pixel_values is not None: # embed image visual_embeddings = self.embed_image(pixel_values) # calculate attention mask visual_attention_mask = tf.ones((batch_size, tf.shape(visual_embeddings)[1]), dtype=int_dtype) if attention_mask is None: attention_mask = visual_attention_mask else: attention_mask = tf.concat([attention_mask, visual_attention_mask], axis=1) # calculate bounding boxes if self.config.has_spatial_attention_bias: visual_bbox = self.calculate_visual_bbox(batch_size, int_dtype) if bbox is None: final_bbox = visual_bbox else: final_bbox = tf.concat([bbox, visual_bbox], axis=1) # calculate position IDs if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: visual_position_ids = tf.range(0, tf.shape(visual_embeddings)[1], dtype=int_dtype) visual_position_ids = tf.expand_dims(visual_position_ids, axis=0) visual_position_ids = tf.tile(visual_position_ids, [batch_size, 1]) if input_ids is not None or inputs_embeds is not None: position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0) position_ids = tf.tile(position_ids, [batch_size, 1]) final_position_ids = tf.concat([position_ids, visual_position_ids], axis=1) else: final_position_ids = visual_position_ids # calculate embeddings if input_ids is None and inputs_embeds is None: embedding_output = visual_embeddings else: embedding_output = tf.concat([embedding_output, visual_embeddings], axis=1) embedding_output = self.LayerNorm(embedding_output) embedding_output = self.dropout(embedding_output, training=training) elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: if self.config.has_relative_attention_bias: position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0) position_ids = tf.tile(position_ids, [batch_size, 1]) final_position_ids = position_ids if self.config.has_spatial_attention_bias: final_bbox = bbox extended_attention_mask = self.get_extended_attention_mask(attention_mask) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape batch_size x num_heads x seq_length x seq_length # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask) encoder_outputs = self.encoder( embedding_output, bbox=final_bbox, position_ids=final_position_ids, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return TFBaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) return TFBaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TFLayoutLMv3PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LayoutLMv3Config base_model_prefix = "layoutlmv3" @property def input_signature(self): sig = super().input_signature sig["bbox"] = tf.TensorSpec((None, None, 4), tf.int32, name="bbox") return sig LAYOUTLMV3_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ LAYOUTLMV3_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size, config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height / config.patch_size) * (width / config.patch_size))`. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are position IDs?](../glossary#position-ids) head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.", LAYOUTLMV3_START_DOCSTRING, ) class TFLayoutLMv3Model(TFLayoutLMv3PreTrainedModel): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"position_ids"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: tf.Tensor | None = None, bbox: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[ TFBaseModelOutput, Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor], ]: r""" Returns: Examples: ```python >>> from transformers import AutoProcessor, TFAutoModel >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = TFAutoModel.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True) >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf") >>> outputs = model(**encoding) >>> last_hidden_states = outputs.last_hidden_state ```""" outputs = self.layoutlmv3( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlmv3", None) is not None: with tf.name_scope(self.layoutlmv3.name): self.layoutlmv3.build(None) class TFLayoutLMv3ClassificationHead(keras.layers.Layer): """ Head for sentence-level classification tasks. Reference: RobertaClassificationHead """ def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, activation="tanh", kernel_initializer=get_initializer(config.initializer_range), name="dense", ) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout( classifier_dropout, name="dropout", ) self.out_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj", ) self.config = config def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor: outputs = self.dropout(inputs, training=training) outputs = self.dense(outputs) outputs = self.dropout(outputs, training=training) outputs = self.out_proj(outputs) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for document image classification tasks such as the [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset. """, LAYOUTLMV3_START_DOCSTRING, ) class TFLayoutLMv3ForSequenceClassification(TFLayoutLMv3PreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"position_ids"] def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(config, **kwargs) self.config = config self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, labels: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, bbox: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[ TFSequenceClassifierOutput, Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor], ]: """ Returns: Examples: ```python >>> from transformers import AutoProcessor, TFAutoModelForSequenceClassification >>> from datasets import load_dataset >>> import tensorflow as tf >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = TFAutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True) >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf") >>> sequence_label = tf.convert_to_tensor([1]) >>> outputs = model(**encoding, labels=sequence_label) >>> loss = outputs.loss >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values, training=training, ) sequence_output = outputs[0][:, 0, :] logits = self.classifier(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlmv3", None) is not None: with tf.name_scope(self.layoutlmv3.name): self.layoutlmv3.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings( """ LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g. for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/), [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and [Kleister-NDA](https://github.com/applicaai/kleister-nda). """, LAYOUTLMV3_START_DOCSTRING, ) class TFLayoutLMv3ForTokenClassification(TFLayoutLMv3PreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"position_ids"] def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(config, **kwargs) self.num_labels = config.num_labels self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") if config.num_labels < 10: self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) else: self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier") self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: tf.Tensor | None = None, bbox: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, labels: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, pixel_values: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[ TFTokenClassifierOutput, Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor], ]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Returns: Examples: ```python >>> from transformers import AutoProcessor, TFAutoModelForTokenClassification >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = TFAutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7) >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True) >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> word_labels = example["ner_tags"] >>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="tf") >>> outputs = model(**encoding) >>> loss = outputs.loss >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values=pixel_values, training=training, ) if input_ids is not None: input_shape = tf.shape(input_ids) else: input_shape = tf.shape(inputs_embeds)[:-1] seq_length = input_shape[1] # only take the text part of the output representations sequence_output = outputs[0][:, :seq_length] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlmv3", None) is not None: with tf.name_scope(self.layoutlmv3.name): self.layoutlmv3.build(None) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ LayoutLMv3 Model with a span classification head on top for extractive question-answering tasks such as [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to compute `span start logits` and `span end logits`). """, LAYOUTLMV3_START_DOCSTRING, ) class TFLayoutLMv3ForQuestionAnswering(TFLayoutLMv3PreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"position_ids"] def __init__(self, config: LayoutLMv3Config, **kwargs): super().__init__(config, **kwargs) self.num_labels = config.num_labels self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") self.qa_outputs = TFLayoutLMv3ClassificationHead(config, name="qa_outputs") @unpack_inputs @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, start_positions: tf.Tensor | None = None, end_positions: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, bbox: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[ TFQuestionAnsweringModelOutput, Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor], ]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: Examples: ```python >>> from transformers import AutoProcessor, TFAutoModelForQuestionAnswering >>> from datasets import load_dataset >>> import tensorflow as tf >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = TFAutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True) >>> example = dataset[0] >>> image = example["image"] >>> question = "what's his name?" >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, question, words, boxes=boxes, return_tensors="tf") >>> start_positions = tf.convert_to_tensor([1]) >>> end_positions = tf.convert_to_tensor([3]) >>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output, training=training) start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) start_logits = tf.squeeze(input=start_logits, axis=-1) end_logits = tf.squeeze(input=end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions, "end_position": end_positions} loss = self.hf_compute_loss(labels, logits=(start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layoutlmv3", None) is not None: with tf.name_scope(self.layoutlmv3.name): self.layoutlmv3.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build(None) __all__ = [ "TFLayoutLMv3ForQuestionAnswering", "TFLayoutLMv3ForSequenceClassification", "TFLayoutLMv3ForTokenClassification", "TFLayoutLMv3Model", "TFLayoutLMv3PreTrainedModel", ]
transformers/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py", "repo_id": "transformers", "token_count": 34281 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert LeViT checkpoints from timm.""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger() def convert_weight_and_push( hidden_sizes: int, name: str, config: LevitConfig, save_directory: Path, push_to_hub: bool = True ): print(f"Converting {name}...") with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": from_model = timm.create_model("levit_128s", pretrained=True) else: from_model = timm.create_model("levit_128", pretrained=True) if hidden_sizes == 192: from_model = timm.create_model("levit_192", pretrained=True) if hidden_sizes == 256: from_model = timm.create_model("levit_256", pretrained=True) if hidden_sizes == 384: from_model = timm.create_model("levit_384", pretrained=True) from_model.eval() our_model = LevitForImageClassificationWithTeacher(config).eval() huggingface_weights = OrderedDict() weights = from_model.state_dict() og_keys = list(from_model.state_dict().keys()) new_keys = list(our_model.state_dict().keys()) print(len(og_keys), len(new_keys)) for i in range(len(og_keys)): huggingface_weights[new_keys[i]] = weights[og_keys[i]] our_model.load_state_dict(huggingface_weights) x = torch.randn((2, 3, 224, 224)) out1 = from_model(x) out2 = our_model(x).logits assert torch.allclose(out1, out2), "The model logits don't match the original one." checkpoint_name = name print(checkpoint_name) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name) image_processor = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name) print(f"Pushed {checkpoint_name}") def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True): filename = "imagenet-1k-id2label.json" num_labels = 1000 expected_shape = (1, num_labels) repo_id = "huggingface/label-files" num_labels = num_labels id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label label2id = {v: k for k, v in id2label.items()} ImageNetPreTrainedConfig = partial(LevitConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) names_to_hidden_sizes = { "levit-128S": 128, "levit-128": 128, "levit-192": 192, "levit-256": 256, "levit-384": 384, } names_to_config = { "levit-128S": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), "levit-128": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), "levit-192": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), "levit-256": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), "levit-384": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name], model_name, names_to_config[model_name], save_directory, push_to_hub ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name], model_name, config, save_directory, push_to_hub) return config, expected_shape if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,", ) parser.add_argument( "--pytorch_dump_folder_path", default="levit-dump-folder/", type=Path, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) args = parser.parse_args() pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/levit/convert_levit_timm_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/levit/convert_levit_timm_to_pytorch.py", "repo_id": "transformers", "token_count": 2739 }
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import glob import torch from huggingface_hub import file_exists, hf_hub_download, snapshot_download from safetensors import safe_open from transformers import ( AddedToken, AutoConfig, AutoImageProcessor, AutoTokenizer, LlavaConfig, LlavaForConditionalGeneration, LlavaProcessor, SiglipVisionConfig, ) EPILOG_TXT = """Example: python transformers/src/transformers/models/llava/convert_llava_weights_to_hf.py --text_model_id lmsys/vicuna-7b-v1.5 --vision_model_id openai/clip-vit-large-patch14-336 --output_hub_path org/llava-v1.5-7b-conv --old_state_dict_id liuhaotian/llava-v1.5-7b Example for creating the old state dict file with Python: import torch from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM # load model kwargs = {"device_map": "auto", "torch_dtype": torch.float16} model = LlavaLlamaForCausalLM.from_pretrained("liuhaotian/llava-v1.5-7b", low_cpu_mem_usage=True, **kwargs) # load vision tower model.get_vision_tower().load_model() # Save state dict torch.save(model.state_dict(), "tmp/hf_models/llava-v1.5-7b/model_state_dict.bin") """ KEYS_TO_MODIFY_MAPPING = { "model.vision_tower.": "", ".vision_resampler": "", # all lmms-lab models do avg pooling, so no vision_resampler "model.mm_projector": "multi_modal_projector", "model": "model.model", "vision_model.model": "vision_model", "lm_head": "language_model.lm_head", "model.model": "language_model.model", "multi_modal_projector.0": "multi_modal_projector.linear_1", "multi_modal_projector.2": "multi_modal_projector.linear_2", } def load_original_state_dict(model_id): directory_path = snapshot_download(repo_id=model_id, allow_patterns=["*.safetensors"]) original_state_dict = {} for path in glob.glob(f"{directory_path}/*"): if path.endswith(".safetensors"): with safe_open(path, framework="pt", device="cpu") as f: for key in f.keys(): original_state_dict[key] = f.get_tensor(key) # tied wieghts so lm.head is not saved. Let's clone to load state dict if "lm_head.weight" not in original_state_dict: original_state_dict["lm_head.weight"] = original_state_dict["model.embed_tokens.weight"].clone() if "model.image_newline" in original_state_dict: # not used in the original implementation because "merge_type=flat" del original_state_dict["model.image_newline"] return original_state_dict # used only for llava-interlave # for ex: Qwen/Qwen1.5-0.5B-Chat google/siglip-so400m-patch14-384 lmms-lab/llava-next-interleave-qwen-0.5b def convert_state_dict_to_hf(state_dict): new_state_dict = {} for key, value in state_dict.items(): if key.endswith(".inv_freq"): continue for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) new_state_dict[key] = value return new_state_dict def convert_llava_llama_to_hf(text_model_id, vision_model_id, output_hub_path, old_state_dict_id): torch.set_default_dtype(torch.float16) text_config = AutoConfig.from_pretrained(text_model_id) tokenizer = AutoTokenizer.from_pretrained(text_model_id) tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True) if "Qwen" not in text_model_id: # qwen already has a pad token tokenizer.add_special_tokens({"pad_token": "<pad>"}) image_processor = AutoImageProcessor.from_pretrained(vision_model_id) processor = LlavaProcessor(tokenizer=tokenizer, image_processor=image_processor) if "siglip" in vision_model_id: vision_config = SiglipVisionConfig( hidden_size=1152, image_size=384, intermediate_size=4304, num_attention_heads=16, num_hidden_layers=26, patch_size=14, vision_use_head=False, ).to_dict() else: vision_config = None config = LlavaConfig( text_config=text_config, vision_config=vision_config, ) # llms-lab interleeave models do not use any selection startegy except for last hidden state if "Qwen" in text_model_id: config.image_token_index = 151646 if "siglip" in vision_model_id: config.vision_feature_select_strategy = "full" config.vision_feature_layer = -1 else: config.pad_token_id = 32001 config.image_token_index = 32000 with torch.device("meta"): model = LlavaForConditionalGeneration(config) # Some llava variants like microsoft/llava-med-v1.5-mistral-7b use safetensors to store weights if file_exists(old_state_dict_id, "model_state_dict.bin"): state_dict_path = hf_hub_download(old_state_dict_id, "model_state_dict.bin") state_dict = torch.load(state_dict_path, map_location="cpu", weights_only=True) else: state_dict = load_original_state_dict(old_state_dict_id) state_dict = convert_state_dict_to_hf(state_dict) model.load_state_dict(state_dict, strict=True, assign=True) pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data mu = torch.mean(pre_expansion_embeddings, dim=0).float() n = pre_expansion_embeddings.size()[0] sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma) # We add an image token so we resize the model and pad to 64 for performance reasons pad_shape = 64 vocab_size = config.text_config.vocab_size model.resize_token_embeddings(config.text_config.vocab_size + 2, pad_shape) model.language_model.model.embed_tokens.weight.data[vocab_size:] = torch.stack( tuple( (dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[vocab_size:].shape[0])) ), dim=0, ) model.language_model.lm_head.weight.data[vocab_size:] = torch.stack( tuple((dist.sample() for _ in range(model.language_model.lm_head.weight.data[vocab_size:].shape[0]))), dim=0, ) model.push_to_hub(output_hub_path) processor.push_to_hub(output_hub_path) def main(): parser = argparse.ArgumentParser( epilog=EPILOG_TXT, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( "--text_model_id", help="Hub location of the text model", ) parser.add_argument( "--vision_model_id", help="Hub location of the vision model", ) parser.add_argument( "--output_hub_path", help="Location on the hub of the converted model", ) parser.add_argument( "--old_state_dict_id", help="Location on the hub of the raw state dict of the original model. The filename needs to be `model_state_dict.bin`", ) args = parser.parse_args() convert_llava_llama_to_hf(args.text_model_id, args.vision_model_id, args.output_hub_path, args.old_state_dict_id) if __name__ == "__main__": main()
transformers/src/transformers/models/llava/convert_llava_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/llava/convert_llava_weights_to_hf.py", "repo_id": "transformers", "token_count": 3201 }
# coding=utf-8 # Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow Longformer model.""" from __future__ import annotations import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_longformer import LongformerConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "allenai/longformer-base-4096" _CONFIG_FOR_DOC = "LongformerConfig" LARGE_NEGATIVE = -1e8 @dataclass class TFLongformerBaseModelOutput(ModelOutput): """ Base class for Longformer's outputs, with potential hidden states, local and global attentions. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None global_attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFLongformerBaseModelOutputWithPooling(ModelOutput): """ Base class for Longformer's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ last_hidden_state: tf.Tensor = None pooler_output: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None global_attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFLongformerMaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None global_attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFLongformerQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering Longformer models. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: tf.Tensor | None = None start_logits: tf.Tensor = None end_logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None global_attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFLongformerSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None global_attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFLongformerMultipleChoiceModelOutput(ModelOutput): """ Base class for outputs of multiple choice models. Args: loss (`tf.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Classification loss. logits (`tf.Tensor` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None global_attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFLongformerTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : Classification loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where `x` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first `x` values) and to every token in the attention window (remaining `attention_window + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be accessed from `global_attentions`. global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None global_attentions: Tuple[tf.Tensor, ...] | None = None def _compute_global_attention_mask(input_ids_shape, sep_token_indices, before_sep_token=True): """ Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is True` else after `sep_token_id`. """ assert shape_list(sep_token_indices)[1] == 2, "`input_ids` should have two dimensions" question_end_index = tf.reshape(sep_token_indices, (input_ids_shape[0], 3, 2))[:, 0, 1][:, None] # bool attention mask with True in locations of global attention attention_mask = tf.expand_dims(tf.range(input_ids_shape[1], dtype=tf.int64), axis=0) attention_mask = tf.tile(attention_mask, (input_ids_shape[0], 1)) if before_sep_token is True: question_end_index = tf.tile(question_end_index, (1, input_ids_shape[1])) attention_mask = tf.cast(attention_mask < question_end_index, dtype=question_end_index.dtype) else: # last token is separation token and should not be counted and in the middle are two separation tokens question_end_index = tf.tile(question_end_index + 1, (1, input_ids_shape[1])) attention_mask = tf.cast( attention_mask > question_end_index, dtype=question_end_index.dtype, ) * tf.cast(attention_mask < input_ids_shape[-1], dtype=question_end_index.dtype) return attention_mask # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead with Roberta->Longformer class TFLongformerLMHead(keras.layers.Layer): """Longformer Head for masked language modeling.""" def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.act = get_tf_activation("gelu") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) def get_output_embeddings(self): return self.decoder def set_output_embeddings(self, value): self.decoder.weight = value self.decoder.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.layer_norm(hidden_states) # project back to size of vocabulary with bias seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states class TFLongformerEmbeddings(keras.layers.Layer): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing and some extra casting. """ def __init__(self, config, **kwargs): super().__init__(**kwargs) self.padding_idx = 1 self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: input_ids: tf.Tensor Returns: tf.Tensor """ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype) incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask return incremental_indices + self.padding_idx def call( self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, past_key_values_length=0, training=False, ): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.cast(tf.fill(dims=input_shape, value=0), tf.int64) if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids( input_ids=input_ids, past_key_values_length=past_key_values_length ) else: position_ids = tf.expand_dims( tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1, dtype=tf.int64), axis=0, ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Longformer class TFLongformerIntermediate(keras.layers.Layer): def __init__(self, config: LongformerConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Longformer class TFLongformerOutput(keras.layers.Layer): def __init__(self, config: LongformerConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Longformer class TFLongformerPooler(keras.layers.Layer): def __init__(self, config: LongformerConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Longformer class TFLongformerSelfOutput(keras.layers.Layer): def __init__(self, config: LongformerConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLongformerSelfAttention(keras.layers.Layer): def __init__(self, config, layer_id, **kwargs): super().__init__(**kwargs) self.config = config if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_heads = config.num_attention_heads self.head_dim = int(config.hidden_size / config.num_attention_heads) self.embed_dim = config.hidden_size self.query = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="value", ) # separate projection layers for tokens with global attention self.query_global = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="query_global", ) self.key_global = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="key_global", ) self.value_global = keras.layers.Dense( self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="value_global", ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.global_dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.layer_id = layer_id attention_window = config.attention_window[self.layer_id] assert ( attention_window % 2 == 0 ), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}" assert ( attention_window > 0 ), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}" self.one_sided_attn_window_size = attention_window // 2 def build(self, input_shape=None): if not self.built: with tf.name_scope("query_global"): self.query_global.build((self.config.hidden_size,)) with tf.name_scope("key_global"): self.key_global.build((self.config.hidden_size,)) with tf.name_scope("value_global"): self.value_global.build((self.config.hidden_size,)) if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) if getattr(self, "query_global", None) is not None: with tf.name_scope(self.query_global.name): self.query_global.build([None, None, self.config.hidden_size]) if getattr(self, "key_global", None) is not None: with tf.name_scope(self.key_global.name): self.key_global.build([None, None, self.config.hidden_size]) if getattr(self, "value_global", None) is not None: with tf.name_scope(self.value_global.name): self.value_global.build([None, None, self.config.hidden_size]) def call( self, inputs, training=False, ): """ LongformerSelfAttention expects *len(hidden_states)* to be multiple of *attention_window*. Padding to *attention_window* happens in LongformerModel.forward to avoid redoing the padding on each layer. The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to: - -10000: no attention - 0: local attention - +10000: global attention """ # retrieve input args ( hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn, ) = inputs # project hidden states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) batch_size, seq_len, embed_dim = shape_list(hidden_states) tf.debugging.assert_equal( embed_dim, self.embed_dim, message=f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}", ) # normalize query query_vectors /= tf.math.sqrt(tf.cast(self.head_dim, dtype=query_vectors.dtype)) query_vectors = tf.reshape(query_vectors, (batch_size, seq_len, self.num_heads, self.head_dim)) key_vectors = tf.reshape(key_vectors, (batch_size, seq_len, self.num_heads, self.head_dim)) # attn_probs = (batch_size, seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to pad for attention probs remove_from_windowed_attention_mask = attention_mask != 0 # cast to fp32/fp16 then replace 1's with -inf float_mask = tf.cast(remove_from_windowed_attention_mask, dtype=query_vectors.dtype) * LARGE_NEGATIVE # diagonal mask with zeros everywhere and -inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( tf.ones(shape_list(attention_mask)), float_mask, self.one_sided_attn_window_size, ) # pad local attention probs attn_scores += diagonal_mask tf.debugging.assert_equal( shape_list(attn_scores), [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1], message=( f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}," f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {shape_list(attn_scores)}" ), ) # compute global attn indices required through out forward fn ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) = self._get_global_attn_indices(is_index_global_attn) # this function is only relevant for global attention if is_global_attn: attn_scores = self._concat_with_global_key_attn_probs( attn_scores=attn_scores, query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) attn_probs = stable_softmax(attn_scores, axis=-1) # softmax sometimes inserts NaN if all positions are masked, replace them with 0 # Make sure to create a mask with the proper shape: # if is_global_attn==True => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1] # if is_global_attn==False => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1] if is_global_attn: masked_index = tf.tile( is_index_masked[:, :, None, None], (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1), ) else: masked_index = tf.tile( is_index_masked[:, :, None, None], (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + 1), ) attn_probs = tf.where( masked_index, tf.zeros(shape_list(masked_index), dtype=attn_probs.dtype), attn_probs, ) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_probs = tf.reshape(layer_head_mask, (1, 1, -1, 1)) * attn_probs # apply dropout attn_probs = self.dropout(attn_probs, training=training) value_vectors = tf.reshape(value_vectors, (batch_size, seq_len, self.num_heads, self.head_dim)) # if global attention, compute sum of global and local attn if is_global_attn: attn_output = self._compute_attn_output_with_global_indices( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: attn_output = self._sliding_chunks_matmul_attn_probs_value( attn_probs, value_vectors, self.one_sided_attn_window_size ) tf.debugging.assert_equal( shape_list(attn_output), [batch_size, seq_len, self.num_heads, self.head_dim], message="Unexpected size" ) attn_output = tf.reshape(attn_output, (batch_size, seq_len, embed_dim)) # compute value for global attention and overwrite to attention output if is_global_attn: attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden( attn_output=attn_output, hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, layer_head_mask=layer_head_mask, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, training=training, ) else: # Leave attn_output unchanged global_attn_probs = tf.zeros((batch_size, self.num_heads, max_num_global_attn_indices, seq_len)) # make sure that local attention probabilities are set to 0 for indices of global attn # Make sure to create a mask with the proper shape: # if is_global_attn==True => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1] # if is_global_attn==False => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1] if is_global_attn: masked_global_attn_index = tf.tile( is_index_global_attn[:, :, None, None], (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1), ) else: masked_global_attn_index = tf.tile( is_index_global_attn[:, :, None, None], (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + 1), ) attn_probs = tf.where( masked_global_attn_index, tf.zeros(shape_list(masked_global_attn_index), dtype=attn_probs.dtype), attn_probs, ) outputs = (attn_output, attn_probs, global_attn_probs) return outputs def _sliding_chunks_query_key_matmul(self, query, key, window_overlap): """ Matrix multiplication of query and key tensors using with a sliding window attention pattern. This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an overlap of size window_overlap """ batch_size, seq_len, num_heads, head_dim = shape_list(query) tf.debugging.assert_equal( seq_len % (window_overlap * 2), 0, message=f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}", ) tf.debugging.assert_equal( shape_list(query), shape_list(key), message=( f"Shape of query and key should be equal, but got query: {shape_list(query)} and key:" f" {shape_list(key)}" ), ) chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2 query = tf.reshape( tf.transpose(query, (0, 2, 1, 3)), (batch_size * num_heads, seq_len, head_dim), ) key = tf.reshape(tf.transpose(key, (0, 2, 1, 3)), (batch_size * num_heads, seq_len, head_dim)) chunked_query = self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap) # matrix multiplication # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap chunked_query = tf.cast(chunked_query, dtype=chunked_key.dtype) chunked_attention_scores = tf.einsum("bcxd,bcyd->bcxy", chunked_query, chunked_key) # multiply # convert diagonals into columns paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 1], [0, 0]]) diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(chunked_attention_scores, paddings) # allocate space for the overall attention matrix where the chunks are combined. The last dimension # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to # window_overlap previous words). The following column is attention score from each word to itself, then # followed by window_overlap columns for the upper triangle. # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions # - copying the main diagonal and the upper triangle # TODO: This code is most likely not very efficient and should be improved diagonal_attn_scores_up_triang = tf.concat( [ diagonal_chunked_attention_scores[:, :, :window_overlap, : window_overlap + 1], diagonal_chunked_attention_scores[:, -1:, window_overlap:, : window_overlap + 1], ], axis=1, ) # - copying the lower triangle diagonal_attn_scores_low_triang = tf.concat( [ tf.zeros( (batch_size * num_heads, 1, window_overlap, window_overlap), dtype=diagonal_chunked_attention_scores.dtype, ), diagonal_chunked_attention_scores[:, :, -(window_overlap + 1) : -1, window_overlap + 1 :], ], axis=1, ) diagonal_attn_scores_first_chunk = tf.concat( [ tf.roll( diagonal_chunked_attention_scores, shift=[1, window_overlap], axis=[2, 3], )[:, :, :window_overlap, :window_overlap], tf.zeros( (batch_size * num_heads, 1, window_overlap, window_overlap), dtype=diagonal_chunked_attention_scores.dtype, ), ], axis=1, ) first_chunk_mask = ( tf.tile( tf.range(chunks_count + 1, dtype=tf.int64)[None, :, None, None], (batch_size * num_heads, 1, window_overlap, window_overlap), ) < 1 ) diagonal_attn_scores_low_triang = tf.where( first_chunk_mask, diagonal_attn_scores_first_chunk, diagonal_attn_scores_low_triang, ) # merging upper and lower triangle diagonal_attention_scores = tf.concat( [diagonal_attn_scores_low_triang, diagonal_attn_scores_up_triang], axis=-1 ) # separate batch_size and num_heads dimensions again diagonal_attention_scores = tf.transpose( tf.reshape( diagonal_attention_scores, (batch_size, num_heads, seq_len, 2 * window_overlap + 1), ), (0, 2, 1, 3), ) diagonal_attention_scores = self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores @staticmethod def _mask_invalid_locations(input_tensor, window_overlap): # create correct upper triangle bool mask mask_2d_upper = tf.reverse( tf.linalg.band_part(tf.ones(shape=(window_overlap, window_overlap + 1)), -1, 0), axis=[0], ) # pad to full matrix padding = tf.convert_to_tensor( [[0, shape_list(input_tensor)[1] - window_overlap], [0, shape_list(input_tensor)[3] - window_overlap - 1]] ) # create lower mask mask_2d = tf.pad(mask_2d_upper, padding) # combine with upper mask mask_2d = mask_2d + tf.reverse(mask_2d, axis=[0, 1]) # broadcast to full matrix mask_4d = tf.tile(mask_2d[None, :, None, :], (shape_list(input_tensor)[0], 1, 1, 1)) # inf tensor used for masking inf_tensor = -float("inf") * tf.ones_like(input_tensor) # mask input_tensor = tf.where(tf.math.greater(mask_4d, 0), inf_tensor, input_tensor) return input_tensor def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_overlap): """ Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the same shape as `attn_probs` """ batch_size, seq_len, num_heads, head_dim = shape_list(value) tf.debugging.assert_equal( seq_len % (window_overlap * 2), 0, message="Seq_len has to be multiple of 2 * window_overlap" ) tf.debugging.assert_equal( shape_list(attn_probs)[:3], shape_list(value)[:3], message="value and attn_probs must have same dims (except head_dim)", ) tf.debugging.assert_equal( shape_list(attn_probs)[3], 2 * window_overlap + 1, message="attn_probs last dim has to be 2 * window_overlap + 1", ) chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap chunked_attn_probs = tf.reshape( tf.transpose(attn_probs, (0, 2, 1, 3)), ( batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1, ), ) # group batch_size and num_heads dimensions into one value = tf.reshape( tf.transpose(value, (0, 2, 1, 3)), (batch_size * num_heads, seq_len, head_dim), ) # pad seq_len with w at the beginning of the sequence and another window overlap at the end paddings = tf.convert_to_tensor([[0, 0], [window_overlap, window_overlap], [0, 0]]) padded_value = tf.pad(value, paddings, constant_values=-1) # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap frame_size = 3 * window_overlap * head_dim frame_hop_size = (shape_list(padded_value)[1] * head_dim - frame_size) // chunks_count chunked_value = tf.signal.frame( tf.reshape(padded_value, (batch_size * num_heads, -1)), frame_size, frame_hop_size, ) chunked_value = tf.reshape( chunked_value, (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim), ) tf.debugging.assert_equal( shape_list(chunked_value), [batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim], message="Chunked value has the wrong shape", ) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = tf.einsum("bcwd,bcdh->bcwh", chunked_attn_probs, chunked_value) context = tf.transpose( tf.reshape(context, (batch_size, num_heads, seq_len, head_dim)), (0, 2, 1, 3), ) return context @staticmethod def _pad_and_transpose_last_two_dims(hidden_states_padded, paddings): """pads rows and then flips rows and columns""" hidden_states_padded = tf.pad( hidden_states_padded, paddings ) # padding value is not important because it will be overwritten batch_size, chunk_size, seq_length, hidden_dim = shape_list(hidden_states_padded) hidden_states_padded = tf.reshape(hidden_states_padded, (batch_size, chunk_size, hidden_dim, seq_length)) return hidden_states_padded @staticmethod def _pad_and_diagonalize(chunked_hidden_states): """ shift every row 1 step right, converting columns into diagonals. Example: ```python chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629, ] window_overlap = num_rows = 4 ``` (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] """ total_num_heads, num_chunks, window_overlap, hidden_dim = shape_list(chunked_hidden_states) paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 0], [0, window_overlap + 1]]) chunked_hidden_states = tf.pad( chunked_hidden_states, paddings ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten chunked_hidden_states = tf.reshape( chunked_hidden_states, (total_num_heads, num_chunks, -1) ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = tf.reshape( chunked_hidden_states, (total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim), ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states @staticmethod def _chunk(hidden_states, window_overlap): """convert into overlapping chunks. Chunk size = 2w, overlap size = w""" batch_size, seq_length, hidden_dim = shape_list(hidden_states) num_output_chunks = 2 * (seq_length // (2 * window_overlap)) - 1 # define frame size and frame stride (similar to convolution) frame_hop_size = window_overlap * hidden_dim frame_size = 2 * frame_hop_size hidden_states = tf.reshape(hidden_states, (batch_size, seq_length * hidden_dim)) # chunk with overlap chunked_hidden_states = tf.signal.frame(hidden_states, frame_size, frame_hop_size) tf.debugging.assert_equal( shape_list(chunked_hidden_states), [batch_size, num_output_chunks, frame_size], message=( "Make sure chunking is correctly applied. `Chunked hidden states should have output dimension" f" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}." ), ) chunked_hidden_states = tf.reshape( chunked_hidden_states, (batch_size, num_output_chunks, 2 * window_overlap, hidden_dim), ) return chunked_hidden_states @staticmethod def _get_global_attn_indices(is_index_global_attn): """compute global attn indices required throughout forward pass""" # helper variable num_global_attn_indices = tf.math.count_nonzero(is_index_global_attn, axis=1) num_global_attn_indices = tf.cast(num_global_attn_indices, dtype=tf.constant(1).dtype) # max number of global attn indices in batch max_num_global_attn_indices = tf.reduce_max(num_global_attn_indices) # indices of global attn is_index_global_attn_nonzero = tf.where(is_index_global_attn) # helper variable is_local_index_global_attn = tf.range(max_num_global_attn_indices) < tf.expand_dims( num_global_attn_indices, axis=-1 ) # location of the non-padding values within global attention indices is_local_index_global_attn_nonzero = tf.where(is_local_index_global_attn) # location of the padding values within global attention indices is_local_index_no_global_attn_nonzero = tf.where(tf.math.logical_not(is_local_index_global_attn)) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, attn_scores, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ): batch_size = shape_list(key_vectors)[0] # select global key vectors global_key_vectors = tf.gather_nd(key_vectors, is_index_global_attn_nonzero) # create only global key vectors key_vectors_only_global = tf.scatter_nd( is_local_index_global_attn_nonzero, global_key_vectors, shape=( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim, ), ) # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = tf.einsum("blhd,bshd->blhs", query_vectors, key_vectors_only_global) # (batch_size, max_num_global_attn_indices, seq_len, num_heads) attn_probs_from_global_key_trans = tf.transpose(attn_probs_from_global_key, (0, 3, 1, 2)) mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple( shape_list(attn_probs_from_global_key_trans)[-2:] ) mask = tf.ones(mask_shape) * -10000.0 mask = tf.cast(mask, dtype=attn_probs_from_global_key_trans.dtype) # scatter mask attn_probs_from_global_key_trans = tf.tensor_scatter_nd_update( attn_probs_from_global_key_trans, is_local_index_no_global_attn_nonzero, mask, ) # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = tf.transpose(attn_probs_from_global_key_trans, (0, 2, 3, 1)) # concat to attn_probs # (batch_size, seq_len, num_heads, extra attention count + 2*window+1) attn_scores = tf.concat((attn_probs_from_global_key, attn_scores), axis=-1) return attn_scores def _compute_attn_output_with_global_indices( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, ): batch_size = shape_list(attn_probs)[0] # cut local attn probs to global only attn_probs_only_global = attn_probs[:, :, :, :max_num_global_attn_indices] # select global value vectors global_value_vectors = tf.gather_nd(value_vectors, is_index_global_attn_nonzero) # create only global value vectors value_vectors_only_global = tf.scatter_nd( is_local_index_global_attn_nonzero, global_value_vectors, shape=( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim, ), ) # compute attn output only global attn_output_only_global = tf.einsum("blhs,bshd->blhd", attn_probs_only_global, value_vectors_only_global) # reshape attn probs attn_probs_without_global = attn_probs[:, :, :, max_num_global_attn_indices:] # compute attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output_from_hidden( self, attn_output, hidden_states, max_num_global_attn_indices, layer_head_mask, is_local_index_global_attn_nonzero, is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, is_index_masked, training, ): batch_size, seq_len = shape_list(hidden_states)[:2] # prepare global hidden states global_attn_hidden_states = tf.gather_nd(hidden_states, is_index_global_attn_nonzero) global_attn_hidden_states = tf.scatter_nd( is_local_index_global_attn_nonzero, global_attn_hidden_states, shape=(batch_size, max_num_global_attn_indices, self.embed_dim), ) # global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /= tf.math.sqrt( tf.cast(self.head_dim, dtype=global_query_vectors_only_global.dtype) ) global_query_vectors_only_global = self.reshape_and_transpose(global_query_vectors_only_global, batch_size) global_key_vectors = self.reshape_and_transpose(global_key_vectors, batch_size) global_value_vectors = self.reshape_and_transpose(global_value_vectors, batch_size) # compute attn scores global_attn_scores = tf.matmul(global_query_vectors_only_global, global_key_vectors, transpose_b=True) tf.debugging.assert_equal( shape_list(global_attn_scores), [batch_size * self.num_heads, max_num_global_attn_indices, seq_len], message=( "global_attn_scores have the wrong size. Size should be" f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is" f" {shape_list(global_attn_scores)}." ), ) global_attn_scores = tf.reshape( global_attn_scores, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len), ) global_attn_scores_trans = tf.transpose(global_attn_scores, (0, 2, 1, 3)) mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple( shape_list(global_attn_scores_trans)[-2:] ) global_attn_mask = tf.ones(mask_shape) * -10000.0 global_attn_mask = tf.cast(global_attn_mask, dtype=global_attn_scores_trans.dtype) # scatter mask global_attn_scores_trans = tf.tensor_scatter_nd_update( global_attn_scores_trans, is_local_index_no_global_attn_nonzero, global_attn_mask, ) global_attn_scores = tf.transpose(global_attn_scores_trans, (0, 2, 1, 3)) # mask global attn scores attn_mask = tf.tile(is_index_masked[:, None, None, :], (1, shape_list(global_attn_scores)[1], 1, 1)) global_attn_scores = tf.where(attn_mask, -10000.0, global_attn_scores) global_attn_scores = tf.reshape( global_attn_scores, (batch_size * self.num_heads, max_num_global_attn_indices, seq_len), ) # compute global attn probs global_attn_probs_float = stable_softmax(global_attn_scores, axis=-1) # apply layer head masking if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) global_attn_probs_float = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( global_attn_probs_float, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len) ) global_attn_probs_float = tf.reshape( global_attn_probs_float, (batch_size * self.num_heads, max_num_global_attn_indices, seq_len) ) # dropout global_attn_probs = self.global_dropout(global_attn_probs_float, training=training) # global attn output global_attn_output = tf.matmul(global_attn_probs, global_value_vectors) tf.debugging.assert_equal( shape_list(global_attn_output), [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim], message=( "global_attn_output tensor has the wrong size. Size should be" f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is" f" {shape_list(global_attn_output)}." ), ) global_attn_output = tf.reshape( global_attn_output, (batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim), ) # get only non zero global attn output nonzero_global_attn_output = tf.gather_nd( tf.transpose(global_attn_output, (0, 2, 1, 3)), is_local_index_global_attn_nonzero, ) nonzero_global_attn_output = tf.reshape( nonzero_global_attn_output, (shape_list(is_local_index_global_attn_nonzero)[0], -1), ) # overwrite values with global attention attn_output = tf.tensor_scatter_nd_update( attn_output, is_index_global_attn_nonzero, nonzero_global_attn_output ) global_attn_probs = tf.reshape( global_attn_probs, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len) ) return attn_output, global_attn_probs def reshape_and_transpose(self, vector, batch_size): return tf.reshape( tf.transpose( tf.reshape(vector, (batch_size, -1, self.num_heads, self.head_dim)), (0, 2, 1, 3), ), (batch_size * self.num_heads, -1, self.head_dim), ) class TFLongformerAttention(keras.layers.Layer): def __init__(self, config, layer_id=0, **kwargs): super().__init__(**kwargs) self.self_attention = TFLongformerSelfAttention(config, layer_id, name="self") self.dense_output = TFLongformerSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call(self, inputs, training=False): ( hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn, ) = inputs self_outputs = self.self_attention( [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn], training=training, ) attention_output = self.dense_output(self_outputs[0], hidden_states, training=training) outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class TFLongformerLayer(keras.layers.Layer): def __init__(self, config, layer_id=0, **kwargs): super().__init__(**kwargs) self.attention = TFLongformerAttention(config, layer_id, name="attention") self.intermediate = TFLongformerIntermediate(config, name="intermediate") self.longformer_output = TFLongformerOutput(config, name="output") def call(self, inputs, training=False): ( hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn, ) = inputs attention_outputs = self.attention( [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn], training=training, ) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.longformer_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "longformer_output", None) is not None: with tf.name_scope(self.longformer_output.name): self.longformer_output.build(None) class TFLongformerEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.layer = [TFLongformerLayer(config, i, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states, attention_mask=None, head_mask=None, padding_len=0, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): all_hidden_states = () if output_hidden_states else None all_attentions = all_global_attentions = () if output_attentions else None for idx, layer_module in enumerate(self.layer): if output_hidden_states: hidden_states_to_add = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states all_hidden_states = all_hidden_states + (hidden_states_to_add,) layer_outputs = layer_module( [ hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, is_index_masked, is_index_global_attn, is_global_attn, ], training=training, ) hidden_states = layer_outputs[0] if output_attentions: # bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1) all_attentions = all_attentions + (tf.transpose(layer_outputs[1], (0, 2, 1, 3)),) # bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn all_global_attentions = all_global_attentions + (tf.transpose(layer_outputs[2], (0, 1, 3, 2)),) # Add last layer if output_hidden_states: hidden_states_to_add = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states all_hidden_states = all_hidden_states + (hidden_states_to_add,) # undo padding # unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1) hidden_states = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states if output_attentions: all_attentions = ( tuple([state[:, :, :-padding_len, :] for state in all_attentions]) if padding_len > 0 else all_attentions ) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None ) return TFLongformerBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, global_attentions=all_global_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFLongformerMainLayer(keras.layers.Layer): config_class = LongformerConfig def __init__(self, config, add_pooling_layer=True, **kwargs): super().__init__(**kwargs) if isinstance(config.attention_window, int): assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value" assert config.attention_window > 0, "`config.attention_window` has to be positive" config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer else: assert len(config.attention_window) == config.num_hidden_layers, ( "`len(config.attention_window)` should equal `config.num_hidden_layers`. " f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}" ) self.config = config self.num_hidden_layers = config.num_hidden_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.pad_token_id = config.pad_token_id self.attention_window = config.attention_window self.embeddings = TFLongformerEmbeddings(config, name="embeddings") self.encoder = TFLongformerEncoder(config, name="encoder") self.pooler = TFLongformerPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids=None, attention_mask=None, head_mask=None, global_attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and not isinstance(input_ids, tf.Tensor): input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64) elif input_ids is not None: input_ids = tf.cast(input_ids, tf.int64) if attention_mask is not None and not isinstance(attention_mask, tf.Tensor): attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64) elif attention_mask is not None: attention_mask = tf.cast(attention_mask, tf.int64) if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor): global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64) elif global_attention_mask is not None: global_attention_mask = tf.cast(global_attention_mask, tf.int64) if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.cast(tf.fill(input_shape, 1), tf.int64) if token_type_ids is None: token_type_ids = tf.cast(tf.fill(input_shape, 0), tf.int64) # merge `global_attention_mask` and `attention_mask` if global_attention_mask is not None: attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask) ( padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, ) = self._pad_to_window_size( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.pad_token_id, ) # is index masked or global attention is_index_masked = tf.math.less(attention_mask, 1) is_index_global_attn = tf.math.greater(attention_mask, 1) is_global_attn = tf.math.reduce_any(is_index_global_attn) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, to_seq_length, 1, 1] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask_shape = shape_list(attention_mask) extended_attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], attention_mask_shape[1], 1, 1)) # Since attention_mask is 1.0 for positions we want to attend locally and 0.0 for # masked and global attn positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(tf.math.abs(1 - extended_attention_mask), tf.dtypes.float32) * -10000.0 embedding_output = self.embeddings( input_ids, position_ids, token_type_ids, inputs_embeds, training=training, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, padding_len=padding_len, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFLongformerBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, global_attentions=encoder_outputs.global_attentions, ) def _pad_to_window_size( self, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, pad_token_id, ): """A helper function to pad tokens and mask to work with implementation of Longformer selfattention.""" # padding attention_window = ( self.attention_window if isinstance(self.attention_window, int) else max(self.attention_window) ) assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}" input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds) batch_size, seq_len = input_shape[:2] padding_len = (attention_window - seq_len % attention_window) % attention_window paddings = tf.convert_to_tensor([[0, 0], [0, padding_len]]) if input_ids is not None: input_ids = tf.pad(input_ids, paddings, constant_values=pad_token_id) if position_ids is not None: # pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings position_ids = tf.pad(position_ids, paddings, constant_values=pad_token_id) if inputs_embeds is not None: if padding_len > 0: input_ids_padding = tf.cast(tf.fill((batch_size, padding_len), self.pad_token_id), tf.int64) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = tf.concat([inputs_embeds, inputs_embeds_padding], axis=-2) attention_mask = tf.pad(attention_mask, paddings, constant_values=False) # no attention on the padding tokens token_type_ids = tf.pad(token_type_ids, paddings, constant_values=0) # pad with token_type_id = 0 return ( padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, ) @staticmethod def _merge_to_attention_mask(attention_mask: tf.Tensor, global_attention_mask: tf.Tensor): # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn) # (global_attention_mask + 1) => 1 for local attention, 2 for global attention # => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention if attention_mask is not None: attention_mask = attention_mask * (global_attention_mask + 1) else: # simply use `global_attention_mask` as `attention_mask` # if no `attention_mask` is given attention_mask = global_attention_mask + 1 return attention_mask def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFLongformerPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LongformerConfig base_model_prefix = "longformer" @property def input_signature(self): sig = super().input_signature sig["global_attention_mask"] = tf.TensorSpec((None, None), tf.int32, name="global_attention_mask") return sig LONGFORMER_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`LongformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LONGFORMER_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`np.ndarray` or `tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. global_attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the [Longformer paper](https://arxiv.org/abs/2004.05150) for more details. Mask values selected in `[0, 1]`: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Longformer Model outputting raw hidden-states without any specific head on top.", LONGFORMER_START_DOCSTRING, ) class TFLongformerModel(TFLongformerPreTrainedModel): """ This class copies code from [`TFRobertaModel`] and overwrites standard self-attention with longformer self-attention to provide the ability to process long sequences following the self-attention approach described in [Longformer: the Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global attention to extend to long documents without the O(n^2) increase in memory and compute. The self-attention module `TFLongformerSelfAttention` implemented here supports the combination of local and global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA kernel to be memory and compute efficient. """ def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.longformer = TFLongformerMainLayer(config, name="longformer") @unpack_inputs @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, global_attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFLongformerBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.longformer( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "longformer", None) is not None: with tf.name_scope(self.longformer.name): self.longformer.build(None) @add_start_docstrings( """Longformer Model with a `language modeling` head on top.""", LONGFORMER_START_DOCSTRING, ) class TFLongformerForMaskedLM(TFLongformerPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer") self.lm_head = TFLongformerLMHead(config, self.longformer.embeddings, name="lm_head") def get_lm_head(self): return self.lm_head def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.lm_head.name @unpack_inputs @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="allenai/longformer-base-4096", output_type=TFLongformerMaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", expected_output="' Paris'", expected_loss=0.44, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, global_attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFLongformerMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.longformer( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFLongformerMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "longformer", None) is not None: with tf.name_scope(self.longformer.name): self.longformer.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) @add_start_docstrings( """ Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, LONGFORMER_START_DOCSTRING, ) class TFLongformerForQuestionAnswering(TFLongformerPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer") self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="allenai/longformer-large-4096-finetuned-triviaqa", output_type=TFLongformerQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, expected_output="' puppet'", expected_loss=0.96, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, global_attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFLongformerQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. """ if input_ids is not None and not isinstance(input_ids, tf.Tensor): input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64) elif input_ids is not None: input_ids = tf.cast(input_ids, tf.int64) if attention_mask is not None and not isinstance(attention_mask, tf.Tensor): attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64) elif attention_mask is not None: attention_mask = tf.cast(attention_mask, tf.int64) if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor): global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64) elif global_attention_mask is not None: global_attention_mask = tf.cast(global_attention_mask, tf.int64) # set global attention on question tokens if global_attention_mask is None and input_ids is not None: if shape_list(tf.where(input_ids == self.config.sep_token_id))[0] != 3 * shape_list(input_ids)[0]: logger.warning( f"There should be exactly three separator tokens: {self.config.sep_token_id} in every sample for" " questions answering. You might also consider to set `global_attention_mask` manually in the" " forward function to avoid this. This is most likely an error. The global attention is disabled" " for this forward pass." ) global_attention_mask = tf.cast(tf.fill(shape_list(input_ids), value=0), tf.int64) else: logger.warning_once("Initializing global attention on question tokens...") # put global attention on all tokens until `config.sep_token_id` is reached sep_token_indices = tf.where(input_ids == self.config.sep_token_id) sep_token_indices = tf.cast(sep_token_indices, dtype=tf.int64) global_attention_mask = _compute_global_attention_mask(shape_list(input_ids), sep_token_indices) outputs = self.longformer( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFLongformerQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "longformer", None) is not None: with tf.name_scope(self.longformer.name): self.longformer.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) class TFLongformerClassificationHead(keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.out_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" ) self.config = config def call(self, hidden_states, training=False): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) output = self.out_proj(hidden_states) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, LONGFORMER_START_DOCSTRING, ) class TFLongformerForSequenceClassification(TFLongformerPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer") self.classifier = TFLongformerClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLongformerSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, global_attention_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFLongformerSequenceClassifierOutput, Tuple[tf.Tensor]]: if input_ids is not None and not isinstance(input_ids, tf.Tensor): input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64) elif input_ids is not None: input_ids = tf.cast(input_ids, tf.int64) if attention_mask is not None and not isinstance(attention_mask, tf.Tensor): attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64) elif attention_mask is not None: attention_mask = tf.cast(attention_mask, tf.int64) if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor): global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64) elif global_attention_mask is not None: global_attention_mask = tf.cast(global_attention_mask, tf.int64) if global_attention_mask is None and input_ids is not None: logger.warning_once("Initializing global attention on CLS token...") # global attention on cls token global_attention_mask = tf.zeros_like(input_ids) updates = tf.ones(shape_list(input_ids)[0], dtype=tf.int64) indices = tf.pad( tensor=tf.expand_dims(tf.range(shape_list(input_ids)[0], dtype=tf.int64), axis=1), paddings=[[0, 0], [0, 1]], constant_values=0, ) global_attention_mask = tf.tensor_scatter_nd_update( global_attention_mask, indices, updates, ) outputs = self.longformer( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFLongformerSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "longformer", None) is not None: with tf.name_scope(self.longformer.name): self.longformer.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings( """ Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, LONGFORMER_START_DOCSTRING, ) class TFLongformerForMultipleChoice(TFLongformerPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.longformer = TFLongformerMainLayer(config, name="longformer") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @property def input_signature(self): return { "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), "global_attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="global_attention_mask"), } @unpack_inputs @add_start_docstrings_to_model_forward( LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLongformerMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, global_attention_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFLongformerMultipleChoiceModelOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_global_attention_mask = ( tf.reshape(global_attention_mask, (-1, shape_list(global_attention_mask)[-1])) if global_attention_mask is not None else None ) flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.longformer( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, global_attention_mask=flat_global_attention_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFLongformerMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "longformer", None) is not None: with tf.name_scope(self.longformer.name): self.longformer.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, LONGFORMER_START_DOCSTRING, ) class TFLongformerForTokenClassification(TFLongformerPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.longformer = TFLongformerMainLayer(config=config, add_pooling_layer=False, name="longformer") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLongformerTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, global_attention_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.array, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFLongformerTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.longformer( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFLongformerTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "longformer", None) is not None: with tf.name_scope(self.longformer.name): self.longformer.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) __all__ = [ "TFLongformerForMaskedLM", "TFLongformerForMultipleChoice", "TFLongformerForQuestionAnswering", "TFLongformerForSequenceClassification", "TFLongformerForTokenClassification", "TFLongformerModel", "TFLongformerPreTrainedModel", "TFLongformerSelfAttention", ]
transformers/src/transformers/models/longformer/modeling_tf_longformer.py/0
{ "file_path": "transformers/src/transformers/models/longformer/modeling_tf_longformer.py", "repo_id": "transformers", "token_count": 55386 }
# coding=utf-8 # Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LXMERT model.""" import math import os import warnings from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss, SmoothL1Loss from ...activations import ACT2FN, gelu from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_lxmert import LxmertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased" _CONFIG_FOR_DOC = "LxmertConfig" class GeLU(nn.Module): def __init__(self): super().__init__() def forward(self, x): return gelu(x) @dataclass class LxmertModelOutput(ModelOutput): """ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship" encoder") Args: language_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the language encoder. vision_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the visual encoder. pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ language_output: Optional[torch.FloatTensor] = None vision_output: Optional[torch.FloatTensor] = None pooled_output: Optional[torch.FloatTensor] = None language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None language_attentions: Optional[Tuple[torch.FloatTensor]] = None vision_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LxmertForQuestionAnsweringOutput(ModelOutput): """ Output type of [`LxmertForQuestionAnswering`]. Args: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.k. question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`, *optional*): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None question_answering_score: Optional[torch.FloatTensor] = None language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None language_attentions: Optional[Tuple[torch.FloatTensor]] = None vision_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LxmertForPreTrainingOutput(ModelOutput): """ Output type of [`LxmertForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: Optional[torch.FloatTensor] = None cross_relationship_score: Optional[torch.FloatTensor] = None question_answering_score: Optional[torch.FloatTensor] = None language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None language_attentions: Optional[Tuple[torch.FloatTensor]] = None vision_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in [ "adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step", ] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model class LxmertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() device = input_ids.device else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device seq_length = input_shape[1] position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class LxmertAttention(nn.Module): def __init__(self, config, ctx_dim=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.head_size = self.num_attention_heads * self.attention_head_size # visual_dim = 2048 if ctx_dim is None: ctx_dim = config.hidden_size self.query = nn.Linear(config.hidden_size, self.head_size) self.key = nn.Linear(ctx_dim, self.head_size) self.value = nn.Linear(ctx_dim, self.head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + ( self.num_attention_heads, self.attention_head_size, ) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, context, attention_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class LxmertAttentionOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LxmertCrossAttentionLayer(nn.Module): def __init__(self, config): super().__init__() self.att = LxmertAttention(config) self.output = LxmertAttentionOutput(config) def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False): output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions) if output_attentions: attention_probs = output[1] attention_output = self.output(output[0], input_tensor) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs class LxmertSelfAttentionLayer(nn.Module): def __init__(self, config): super().__init__() self.self = LxmertAttention(config) self.output = LxmertAttentionOutput(config) def forward(self, input_tensor, attention_mask, output_attentions=False): # Self attention attends to itself, thus keys and queries are the same (input_tensor). output = self.self( input_tensor, input_tensor, attention_mask, output_attentions=output_attentions, ) if output_attentions: attention_probs = output[1] attention_output = self.output(output[0], input_tensor) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs class LxmertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.intermediate_act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class LxmertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LxmertLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = LxmertSelfAttentionLayer(config) self.intermediate = LxmertIntermediate(config) self.output = LxmertOutput(config) def forward(self, hidden_states, attention_mask=None, output_attentions=False): outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) attention_output = outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs[1:] # add attentions if we output them return outputs class LxmertXLayer(nn.Module): def __init__(self, config): super().__init__() # The cross-attention Layer self.visual_attention = LxmertCrossAttentionLayer(config) # Self-attention Layers self.lang_self_att = LxmertSelfAttentionLayer(config) self.visn_self_att = LxmertSelfAttentionLayer(config) # Intermediate and Output Layers (FFNs) self.lang_inter = LxmertIntermediate(config) self.lang_output = LxmertOutput(config) self.visn_inter = LxmertIntermediate(config) self.visn_output = LxmertOutput(config) def cross_att( self, lang_input, lang_attention_mask, visual_input, visual_attention_mask, output_x_attentions=False, ): # Cross Attention lang_att_output = self.visual_attention( lang_input, visual_input, ctx_att_mask=visual_attention_mask, output_attentions=output_x_attentions, ) visual_att_output = self.visual_attention( visual_input, lang_input, ctx_att_mask=lang_attention_mask, output_attentions=False, ) return lang_att_output, visual_att_output def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask): # Self Attention lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False) visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False) return lang_att_output[0], visual_att_output[0] def output_fc(self, lang_input, visual_input): # FC layers lang_inter_output = self.lang_inter(lang_input) visual_inter_output = self.visn_inter(visual_input) # Layer output lang_output = self.lang_output(lang_inter_output, lang_input) visual_output = self.visn_output(visual_inter_output, visual_input) return lang_output, visual_output def forward( self, lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions=False, ): lang_att_output, visual_att_output = self.cross_att( lang_input=lang_feats, lang_attention_mask=lang_attention_mask, visual_input=visual_feats, visual_attention_mask=visual_attention_mask, output_x_attentions=output_attentions, ) attention_probs = lang_att_output[1:] lang_att_output, visual_att_output = self.self_att( lang_att_output[0], lang_attention_mask, visual_att_output[0], visual_attention_mask, ) lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output) return ( ( lang_output, visual_output, attention_probs[0], ) if output_attentions else (lang_output, visual_output) ) class LxmertVisualFeatureEncoder(nn.Module): def __init__(self, config): super().__init__() feat_dim = config.visual_feat_dim pos_dim = config.visual_pos_dim # Object feature encoding self.visn_fc = nn.Linear(feat_dim, config.hidden_size) self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12) # Box position encoding self.box_fc = nn.Linear(pos_dim, config.hidden_size) self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, visual_feats, visual_pos): x = self.visn_fc(visual_feats) x = self.visn_layer_norm(x) y = self.box_fc(visual_pos) y = self.box_layer_norm(y) output = (x + y) / 2 output = self.dropout(output) return output class LxmertEncoder(nn.Module): def __init__(self, config): super().__init__() # Obj-level image embedding layer self.visn_fc = LxmertVisualFeatureEncoder(config) self.config = config # Number of layers self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers # Layers # Using self.layer instead of self.l_layer to support loading BERT weights. self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)]) self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)]) self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)]) def forward( self, lang_feats, lang_attention_mask, visual_feats, visual_pos, visual_attention_mask=None, output_attentions=None, ): vision_hidden_states = () language_hidden_states = () vision_attentions = () if output_attentions or self.config.output_attentions else None language_attentions = () if output_attentions or self.config.output_attentions else None cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None visual_feats = self.visn_fc(visual_feats, visual_pos) # Run language layers for layer_module in self.layer: l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions) lang_feats = l_outputs[0] language_hidden_states = language_hidden_states + (lang_feats,) if language_attentions is not None: language_attentions = language_attentions + (l_outputs[1],) # Run relational layers for layer_module in self.r_layers: v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions) visual_feats = v_outputs[0] vision_hidden_states = vision_hidden_states + (visual_feats,) if vision_attentions is not None: vision_attentions = vision_attentions + (v_outputs[1],) # Run cross-modality layers for layer_module in self.x_layers: x_outputs = layer_module( lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions=output_attentions, ) lang_feats, visual_feats = x_outputs[:2] vision_hidden_states = vision_hidden_states + (visual_feats,) language_hidden_states = language_hidden_states + (lang_feats,) if cross_encoder_attentions is not None: cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],) visual_encoder_outputs = ( vision_hidden_states, vision_attentions if output_attentions else None, ) lang_encoder_outputs = ( language_hidden_states, language_attentions if output_attentions else None, ) return ( visual_encoder_outputs, lang_encoder_outputs, cross_encoder_attentions if output_attentions else None, ) class LxmertPooler(nn.Module): def __init__(self, config): super(LxmertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class LxmertPredictionHeadTransform(nn.Module): def __init__(self, config): super(LxmertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = ACT2FN[config.hidden_act] self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class LxmertLMPredictionHead(nn.Module): def __init__(self, config, lxmert_model_embedding_weights): super(LxmertLMPredictionHead, self).__init__() self.transform = LxmertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear( lxmert_model_embedding_weights.size(1), lxmert_model_embedding_weights.size(0), bias=False, ) self.decoder.weight = lxmert_model_embedding_weights self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0))) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class LxmertVisualAnswerHead(nn.Module): def __init__(self, config, num_labels): super().__init__() hid_dim = config.hidden_size self.logit_fc = nn.Sequential( nn.Linear(hid_dim, hid_dim * 2), GeLU(), nn.LayerNorm(hid_dim * 2, eps=1e-12), nn.Linear(hid_dim * 2, num_labels), ) def forward(self, hidden_states): return self.logit_fc(hidden_states) class LxmertVisualObjHead(nn.Module): def __init__(self, config): super().__init__() self.transform = LxmertPredictionHeadTransform(config) # Decide the use of visual losses visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels} if config.visual_attr_loss: visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels} if config.visual_feat_loss: visual_losses["feat"] = { "shape": (-1, config.visual_feat_dim), "num": config.visual_feat_dim, } self.visual_losses = visual_losses # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder_dict = nn.ModuleDict( {key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses} ) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) output = {} for key in self.visual_losses: output[key] = self.decoder_dict[key](hidden_states) return output class LxmertPreTrainingHeads(nn.Module): def __init__(self, config, lxmert_model_embedding_weights): super(LxmertPreTrainingHeads, self).__init__() self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class LxmertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LxmertConfig load_tf_weights = load_tf_weights_in_lxmert base_model_prefix = "lxmert" _supports_param_buffer_assignment = False def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) LXMERT_START_DOCSTRING = r""" The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LxmertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LXMERT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.", LXMERT_START_DOCSTRING, ) class LxmertModel(LxmertPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = LxmertEmbeddings(config) self.encoder = LxmertEncoder(config) self.pooler = LxmertPooler(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=LxmertModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, visual_feats: Optional[torch.FloatTensor] = None, visual_pos: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[LxmertModelOutput, Tuple[torch.FloatTensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if visual_feats is None: raise ValueError("`visual_feats` cannot be `None`") if visual_pos is None: raise ValueError("`visual_pos` cannot be `None`") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min # Process the visual attention mask if visual_attention_mask is not None: extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2) extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype) extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * torch.finfo(self.dtype).min else: extended_visual_attention_mask = None # Positional Word Embeddings embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds) # Run Lxmert encoder encoder_outputs = self.encoder( embedding_output, extended_attention_mask, visual_feats=visual_feats, visual_pos=visual_pos, visual_attention_mask=extended_visual_attention_mask, output_attentions=output_attentions, ) visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2] vision_hidden_states = visual_encoder_outputs[0] language_hidden_states = lang_encoder_outputs[0] all_attentions = () if output_attentions: language_attentions = lang_encoder_outputs[1] vision_attentions = visual_encoder_outputs[1] cross_encoder_attentions = encoder_outputs[2] all_attentions = ( language_attentions, vision_attentions, cross_encoder_attentions, ) hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else () visual_output = vision_hidden_states[-1] lang_output = language_hidden_states[-1] pooled_output = self.pooler(lang_output) if not return_dict: return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions return LxmertModelOutput( pooled_output=pooled_output, language_output=lang_output, vision_output=visual_output, language_hidden_states=language_hidden_states if output_hidden_states else None, vision_hidden_states=vision_hidden_states if output_hidden_states else None, language_attentions=language_attentions if output_attentions else None, vision_attentions=vision_attentions if output_attentions else None, cross_encoder_attentions=cross_encoder_attentions if output_attentions else None, ) @add_start_docstrings( """Lxmert Model with a specified pretraining head on top.""", LXMERT_START_DOCSTRING, ) class LxmertForPreTraining(LxmertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight"] def __init__(self, config): super().__init__(config) # Configuration self.config = config self.num_qa_labels = config.num_qa_labels self.visual_loss_normalizer = config.visual_loss_normalizer # Use of pretraining tasks self.task_mask_lm = config.task_mask_lm self.task_obj_predict = config.task_obj_predict self.task_matched = config.task_matched self.task_qa = config.task_qa # Lxmert backbone self.lxmert = LxmertModel(config) # Pre-training heads self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight) if self.task_obj_predict: self.obj_predict_head = LxmertVisualObjHead(config) if self.task_qa: self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) # Weight initialization # Initialize weights and apply final processing self.post_init() # Loss functions self.loss_fcts = { "l2": SmoothL1Loss(reduction="none"), "visual_ce": CrossEntropyLoss(reduction="none"), "ce": CrossEntropyLoss(), } visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = { "shape": (-1,), "num": config.num_object_labels, "loss": "visual_ce", } if config.visual_attr_loss: visual_losses["attr"] = { "shape": (-1,), "num": config.num_attr_labels, "loss": "visual_ce", } if config.visual_feat_loss: visual_losses["feat"] = { "shape": (-1, config.visual_feat_dim), "num": config.visual_feat_dim, "loss": "l2", } self.visual_losses = visual_losses def resize_token_embeddings( self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True ) -> nn.Embedding: # Adding the following steps to resize bias to match the shape of resized embeddings new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) self.cls.predictions.bias = self._resize_bias(self.cls.predictions.bias, new_num_tokens) return new_embeddings def _resize_bias(self, bias, new_num_tokens: int): old_num_tokens = bias.shape[0] if new_num_tokens <= old_num_tokens: new_bias = bias[:new_num_tokens] else: extra_bias = torch.zeros(new_num_tokens - old_num_tokens, device=bias.device) new_bias = torch.cat([bias, extra_bias]) new_bias = nn.Parameter(new_bias) return new_bias def resize_num_qa_labels(self, num_labels): """ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size will add newly initialized weights. Reducing the size will remove weights from the end Args: num_labels (`int`, *optional*): New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything. Return: `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer """ cur_qa_logit_layer = self.get_qa_logit_layer() if num_labels is None or cur_qa_logit_layer is None: return new_qa_logit_layer = self._resize_qa_labels(num_labels) self.config.num_qa_labels = num_labels self.num_qa_labels = num_labels return new_qa_logit_layer def _resize_qa_labels(self, num_labels): cur_qa_logit_layer = self.get_qa_logit_layer() new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels) self._set_qa_logit_layer(new_qa_logit_layer) return self.get_qa_logit_layer() def get_qa_logit_layer(self) -> nn.Module: """ Returns the linear layer that produces question answering logits. Returns: `nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT does not have a visual answering head. """ if hasattr(self, "answer_head"): return self.answer_head.logit_fc[-1] def _set_qa_logit_layer(self, qa_logit_layer): self.answer_head.logit_fc[-1] = qa_logit_layer def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels): if num_labels is None: return cur_qa_logit_layer cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size() if cur_qa_labels == num_labels: return cur_qa_logit_layer # Build new linear output if getattr(cur_qa_logit_layer, "bias", None) is not None: new_qa_logit_layer = nn.Linear(hidden_dim, num_labels) else: new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False) new_qa_logit_layer.to(cur_qa_logit_layer.weight.device) # initialize all new labels self._init_weights(new_qa_logit_layer) # Copy labels from the previous weights num_labels_to_copy = min(cur_qa_labels, num_labels) new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :] if getattr(cur_qa_logit_layer, "bias", None) is not None: new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy] return new_qa_logit_layer @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=LxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, visual_feats: Optional[torch.FloatTensor] = None, visual_pos: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, obj_labels: Optional[Dict[str, Tuple[torch.FloatTensor, torch.FloatTensor]]] = None, matched_label: Optional[torch.LongTensor] = None, ans: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[LxmertForPreTrainingOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` obj_labels (`Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]`, *optional*): each key is named after each one of the visual losses and each element of the tuple is of the shape `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and the label score respectively matched_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates that the sentence does not match the image, - 1 indicates that the sentence does match the image. ans (`Torch.Tensor` of shape `(batch_size)`, *optional*): a one hot representation hof the correct answer *optional* Returns: """ if "masked_lm_labels" in kwargs: warnings.warn( "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels`" " instead.", FutureWarning, ) labels = kwargs.pop("masked_lm_labels") return_dict = return_dict if return_dict is not None else self.config.use_return_dict device = input_ids.device if input_ids is not None else inputs_embeds.device lxmert_output = self.lxmert( input_ids=input_ids, visual_feats=visual_feats, visual_pos=visual_pos, token_type_ids=token_type_ids, attention_mask=attention_mask, visual_attention_mask=visual_attention_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) lang_output, visual_output, pooled_output = ( lxmert_output[0], lxmert_output[1], lxmert_output[2], ) lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output) if self.task_qa: answer_score = self.answer_head(pooled_output) else: answer_score = pooled_output[0][0] total_loss = ( None if (labels is None and matched_label is None and obj_labels is None and ans is None) else torch.tensor(0.0, device=device) ) if labels is not None and self.task_mask_lm: masked_lm_loss = self.loss_fcts["ce"]( lang_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1), ) total_loss += masked_lm_loss if matched_label is not None and self.task_matched: matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1)) total_loss += matched_loss if obj_labels is not None and self.task_obj_predict: total_visual_loss = torch.tensor(0.0, device=input_ids.device) visual_prediction_scores_dict = self.obj_predict_head(visual_output) for key, key_info in self.visual_losses.items(): label, mask_conf = obj_labels[key] output_dim = key_info["num"] loss_fct_name = key_info["loss"] label_shape = key_info["shape"] weight = self.visual_loss_normalizer visual_loss_fct = self.loss_fcts[loss_fct_name] visual_prediction_scores = visual_prediction_scores_dict[key] visual_loss = visual_loss_fct( visual_prediction_scores.view(-1, output_dim), label.view(label_shape), ) if visual_loss.dim() > 1: # Regression Losses visual_loss = visual_loss.mean(1) visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight total_visual_loss += visual_loss total_loss += total_visual_loss if ans is not None and self.task_qa: answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1)) total_loss += answer_loss if not return_dict: output = ( lang_prediction_scores, cross_relationship_score, answer_score, ) + lxmert_output[3:] return ((total_loss,) + output) if total_loss is not None else output return LxmertForPreTrainingOutput( loss=total_loss, prediction_logits=lang_prediction_scores, cross_relationship_score=cross_relationship_score, question_answering_score=answer_score, language_hidden_states=lxmert_output.language_hidden_states, vision_hidden_states=lxmert_output.vision_hidden_states, language_attentions=lxmert_output.language_attentions, vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions, ) @add_start_docstrings( """Lxmert Model with a visual-answering head on top for downstream QA tasks""", LXMERT_START_DOCSTRING, ) class LxmertForQuestionAnswering(LxmertPreTrainedModel): def __init__(self, config): super().__init__(config) # Configuration self.config = config self.num_qa_labels = config.num_qa_labels self.visual_loss_normalizer = config.visual_loss_normalizer # Lxmert backbone self.lxmert = LxmertModel(config) self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) # Weight initialization # Initialize weights and apply final processing self.post_init() # Loss function self.loss = CrossEntropyLoss() def resize_num_qa_labels(self, num_labels): """ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size will add newly initialized weights. Reducing the size will remove weights from the end Args: num_labels (`int`, *optional*): New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything. Return: `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer """ cur_qa_logit_layer = self.get_qa_logit_layer() if num_labels is None or cur_qa_logit_layer is None: return new_qa_logit_layer = self._resize_qa_labels(num_labels) self.config.num_qa_labels = num_labels self.num_qa_labels = num_labels return new_qa_logit_layer def _resize_qa_labels(self, num_labels): cur_qa_logit_layer = self.get_qa_logit_layer() new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels) self._set_qa_logit_layer(new_qa_logit_layer) return self.get_qa_logit_layer() def get_qa_logit_layer(self) -> nn.Module: """ Returns the linear layer that produces question answering logits Returns: `nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType object if Lxmert does not have the visual answering head. """ if hasattr(self, "answer_head"): return self.answer_head.logit_fc[-1] def _set_qa_logit_layer(self, qa_logit_layer): self.answer_head.logit_fc[-1] = qa_logit_layer def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels): if num_labels is None: return cur_qa_logit_layer cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size() if cur_qa_labels == num_labels: return cur_qa_logit_layer # Build new linear output if getattr(cur_qa_logit_layer, "bias", None) is not None: new_qa_logit_layer = nn.Linear(hidden_dim, num_labels) else: new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False) new_qa_logit_layer.to(cur_qa_logit_layer.weight.device) # initialize all new labels self._init_weights(new_qa_logit_layer) # Copy labels from the previous weights num_labels_to_copy = min(cur_qa_labels, num_labels) new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :] if getattr(cur_qa_logit_layer, "bias", None) is not None: new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy] return new_qa_logit_layer @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=LxmertForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, visual_feats: Optional[torch.FloatTensor] = None, visual_pos: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[LxmertForQuestionAnsweringOutput, Tuple[torch.FloatTensor]]: r""" labels (`Torch.Tensor` of shape `(batch_size)`, *optional*): A one-hot representation of the correct answer """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict lxmert_output = self.lxmert( input_ids=input_ids, visual_feats=visual_feats, visual_pos=visual_pos, token_type_ids=token_type_ids, attention_mask=attention_mask, visual_attention_mask=visual_attention_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) pooled_output = lxmert_output[2] answer_score = self.answer_head(pooled_output) loss = None if labels is not None: loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1)) if not return_dict: output = (answer_score,) + lxmert_output[3:] return (loss,) + output if loss is not None else output return LxmertForQuestionAnsweringOutput( loss=loss, question_answering_score=answer_score, language_hidden_states=lxmert_output.language_hidden_states, vision_hidden_states=lxmert_output.vision_hidden_states, language_attentions=lxmert_output.language_attentions, vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions, ) __all__ = [ "LxmertEncoder", "LxmertForPreTraining", "LxmertForQuestionAnswering", "LxmertModel", "LxmertPreTrainedModel", "LxmertVisualFeatureEncoder", "LxmertXLayer", ]
transformers/src/transformers/models/lxmert/modeling_lxmert.py/0
{ "file_path": "transformers/src/transformers/models/lxmert/modeling_lxmert.py", "repo_id": "transformers", "token_count": 28119 }
# coding=utf-8 # Copyright 2024 state-spaces/mamba2 org and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MAMBA2 model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from ...utils.import_utils import is_causal_conv1d_available, is_mamba_2_ssm_available from .configuration_mamba2 import Mamba2Config logger = logging.get_logger(__name__) if is_mamba_2_ssm_available(): from mamba_ssm.ops.triton.selective_state_update import selective_state_update from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined else: mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined, selective_state_update = None, None, None if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_update, causal_conv1d_fn = None, None is_fast_path_available = all( ( selective_state_update, mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined, causal_conv1d_fn, causal_conv1d_update, ) ) _CHECKPOINT_FOR_DOC = "mistralai/mamba-codestral-7B-v0.1" _CONFIG_FOR_DOC = "Mamba2Config" # Helper methods for segment sum computation def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int): """ Padding x tensor with `pad_size` on the seq_len dim (dim=1) Assumes that we only have tensors of either size 4 or 3 """ pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0) return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0) def reshape_into_chunks(input_tensor, pad_size, chunk_size): """ Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and simultaneously splitting it into chunk sequences. Assumes that we only have tensors of either size 4 or 3 """ # [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...] input_tensor = pad_tensor_by_size(input_tensor, pad_size) if len(input_tensor.shape) == 3: # [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads] return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2]) else: # [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size] return input_tensor.reshape( input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3] ) def segment_sum(input_tensor): """ More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions. """ chunk_size = input_tensor.size(-1) # 1. expand input tensor to have an additional dimension and repeat along that dimension # [..., chunk_size] -> [..., chunk_size, chunk_size] input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size) # 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1) input_tensor = input_tensor.masked_fill(~mask, 0) # 3. compute actual cumsum tensor_segsum = torch.cumsum(input_tensor, dim=-2) # 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time) mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0) tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf) return tensor_segsum def apply_mask_to_padding_states(hidden_states, attention_mask): """ Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66 """ if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: dtype = hidden_states.dtype hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return hidden_states class Mamba2Cache: """ Arguments: config: Mamba2Config batch_size: int dtype: torch.dtype device: torch.device Attributes: dtype: (`torch.dtype`): The default `dtype` used to initializing the cache. conv_kernel_size: (`int`): Model's convolution kernel size taken from config. n_groups: (`int`): Model's number of groups taken from the config - similar to tensor parallel in Transformer. state_size: (`int`): Model's SSM state size taken from config. num_heads: (`int`): The number of heads used in the linear attention / SSM. head_dim: (`int`): The respective dimension of the heads used in the linear attention / SSM. intermediate_size: (`int`): Model's intermediate_size based on (expand * hidden_dim) from config. conv_states: (`torch.Tensor`): A tensor of shape `[num_layers, batch_size, conv_kernel_size, intermediate_size + 2 * n_groups * state_size]` that holds convolutional states. ssm_states: (`torch.Tensor`): A tensor of shape `[num_layers, batch_size, num_heads, head_dim, state_size]` that holds ssm states. """ def __init__( self, config: Mamba2Config, batch_size: int, dtype: torch.dtype = torch.float16, device: Optional[str] = None ): self.dtype = dtype self.conv_kernel_size = config.conv_kernel self.n_groups = config.n_groups self.state_size = config.state_size self.num_heads = config.num_heads self.head_dim = config.head_dim self.intermediate_size = int(config.expand * config.hidden_size) self.conv_states = torch.zeros( config.num_hidden_layers, batch_size, self.intermediate_size + 2 * self.n_groups * self.state_size, self.conv_kernel_size, device=device, dtype=dtype, ) self.ssm_states = torch.zeros( config.num_hidden_layers, batch_size, self.num_heads, self.head_dim, self.state_size, device=device, dtype=dtype, ) def update_conv_state( self, layer_idx: int, new_conv_state: torch.Tensor, cache_init: bool = False ) -> torch.Tensor: if cache_init: self.conv_states[layer_idx] = new_conv_state.to(self.conv_states.device) else: self.conv_states[layer_idx] = self.conv_states[layer_idx].roll(shifts=-1, dims=-1) self.conv_states[layer_idx][:, :, -1] = new_conv_state[:, 0, :].to(self.conv_states.device) return self.conv_states[layer_idx] def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor): self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device) return self.ssm_states[layer_idx] def reset(self): self.conv_states.zero_() self.ssm_states.zero_() class MambaRMSNormGated(torch.nn.Module): def __init__(self, hidden_size, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states, gate=None): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) if gate is not None: hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32)) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) class Mamba2Mixer(nn.Module): """ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) """ def __init__(self, config: Mamba2Config, layer_idx: int): super().__init__() self.num_heads = config.num_heads self.hidden_size = config.hidden_size self.ssm_state_size = config.state_size self.conv_kernel_size = config.conv_kernel self.intermediate_size = int(config.expand * self.hidden_size) self.time_step_rank = int(config.time_step_rank) self.layer_idx = layer_idx self.use_conv_bias = config.use_conv_bias self.activation = config.hidden_act self.act = ACT2FN[config.hidden_act] self.layer_norm_epsilon = config.layer_norm_epsilon self.rms_norm = config.rms_norm self.n_groups = config.n_groups self.head_dim = config.head_dim self.chunk_size = config.chunk_size self.time_step_limit = config.time_step_limit self.time_step_min = config.time_step_min self.time_step_max = config.time_step_max self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size self.conv1d = nn.Conv1d( in_channels=self.conv_dim, out_channels=self.conv_dim, bias=config.use_conv_bias, kernel_size=config.conv_kernel, groups=self.conv_dim, padding=config.conv_kernel - 1, ) # projection of the input hidden states projection_size = self.intermediate_size + self.conv_dim + self.num_heads self.in_proj = nn.Linear( self.hidden_size, projection_size, bias=config.use_bias, ) # selective projection used to make dt, B and C input dependant # time step projection (discretization) # instantiate once and copy inv_dt in init_weights of PretrainedModel self.dt_bias = nn.Parameter(torch.ones(self.num_heads)) # S4D real initialization. These are not discretized! # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded A = torch.arange(1, self.num_heads + 1) self.A_log = nn.Parameter(torch.log(A)) self.A_log._no_weight_decay = True self.norm = MambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon) self.D = nn.Parameter(torch.ones(self.num_heads)) self.D._no_weight_decay = True self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) self.use_bias = config.use_bias if not is_fast_path_available: logger.warning_once( "The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`" " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and" " https://github.com/Dao-AILab/causal-conv1d" ) def cuda_kernels_forward( self, hidden_states: torch.Tensor, cache_params: Optional[Mamba2Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): # 1. Gated MLP's linear projection hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask) projected_states = self.in_proj(hidden_states) # Set up dimensions for reshapes later batch_size, seq_len, _ = hidden_states.shape groups_time_state_size = self.n_groups * self.ssm_state_size d_mlp = ( projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size - self.num_heads ) // 2 # Single step calculations via cache if cache_params is not None and cache_position is not None and cache_position[0] > 0: _, _, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split( [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation hidden_states_B_C = causal_conv1d_update( hidden_states_B_C, cache_params.conv_states[self.layer_idx], self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation, ) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1, ) # 3. SSM transformation A = -torch.exp(self.A_log.float()) # (nheads,) A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) dt = dt[:, :, None].expand(-1, -1, self.head_dim) dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim) D = self.D[:, None, ...].expand(-1, self.head_dim) B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups) C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups) hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim) hidden_states = selective_state_update( cache_params.ssm_states[self.layer_idx], hidden_states_reshaped, dt, A, B, C, D, z=None, dt_bias=dt_bias, dt_softplus=True, ) hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim) hidden_states = self.norm(hidden_states, gate) # 4. Final linear projection out = self.out_proj(hidden_states)[:, None, ...] # Fused calculations or step by step if no initialized cache is found else: A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size) dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit} # 2-4. Fused kernel for conv1d, SSM, and the final projection if self.training and cache_params is None: out = mamba_split_conv1d_scan_combined( projected_states, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.dt_bias, A, D=self.D, chunk_size=self.chunk_size, seq_idx=None, # was seq_idx activation=self.activation, rmsnorm_weight=self.norm.weight, rmsnorm_eps=self.norm.variance_epsilon, outproj_weight=self.out_proj.weight, outproj_bias=self.out_proj.bias, headdim=self.head_dim, ngroups=self.n_groups, norm_before_gate=False, return_final_states=False, **dt_limit_kwargs, ) else: _, _, gate, hidden_states_B_C, dt = projected_states.split( [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation # Init cache if cache_params is not None: hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) conv_states = nn.functional.pad( hidden_states_B_C_transposed, (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0), ) cache_params.update_conv_state( layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True ) if self.activation not in ["silu", "swish"]: hidden_states_B_C = self.act( self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2) ) else: hidden_states_B_C = causal_conv1d_fn( x=hidden_states_B_C.transpose(1, 2), weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation, ).transpose(1, 2) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1, ) # 3. SSM transformation scan_output, ssm_state = mamba_chunk_scan_combined( hidden_states.view(batch_size, seq_len, -1, self.head_dim), dt, A, B.view(batch_size, seq_len, self.n_groups, -1), C.view(batch_size, seq_len, self.n_groups, -1), chunk_size=self.chunk_size, D=self.D, z=None, seq_idx=None, return_final_states=True, dt_bias=self.dt_bias, dt_softplus=True, **dt_limit_kwargs, ) # Init cache if ssm_state is not None and cache_params is not None: cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state) scan_output = scan_output.view(batch_size, seq_len, -1) # Multiply "gate" branch and apply extra normalization layer scan_output = self.norm(scan_output, gate) # 4. Final linear projection out = self.out_proj(scan_output) return out # fmt: off def torch_forward(self, input_states, cache_params: Optional[Mamba2Cache]=None, cache_position:Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None): batch_size, seq_len, _ = input_states.shape dtype = input_states.dtype # 1. Gated MLP's linear projection input_states = apply_mask_to_padding_states(input_states, attention_mask) projected_states = self.in_proj(input_states) d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size-self.num_heads) // 2 _, _, gate, hidden_states_B_C, dt = projected_states.split( [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation if cache_params is not None and cache_position is not None and cache_position[0] > 0: cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=hidden_states_B_C, cache_init=False) # We need to guarantee that anything regarding the cache is on the same device conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device) hidden_states_B_C = torch.sum( conv_states * self.conv1d.weight.squeeze(1), dim=-1 ) if self.use_conv_bias: hidden_states_B_C = hidden_states_B_C + self.conv1d.bias hidden_states_B_C = self.act(hidden_states_B_C) else: # Init cache if cache_params is not None: hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) conv_states = nn.functional.pad( hidden_states_B_C_transposed, (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0) ) cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True) hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1 ) # 3. SSM transformation A = -torch.exp(self.A_log.float()) # [num_heads] if cache_params is not None and cache_position is not None and cache_position[0] > 0: # We need to guarantee that anything regarding the cache is on the same device cache_device = cache_params.ssm_states.device # Note: there is no need to pad parameter matrices here, as there is just one new token # for batched generation dt = dt[:, 0, :][:, None, ...] dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim) # [num_heads] -> [num_heads, head_dim] dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim) dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype)) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) # [bsz, num_heads, head_dim, state_size] dA = (torch.exp(dt[..., None] * A)).to(device=cache_device) # Discretize B # [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] -> # -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size] B = B.reshape(batch_size, self.n_groups, -1)[..., None, :] B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous() B = B.reshape(batch_size, -1, B.shape[-1]) # [bsz, num_heads, head_dim, state_size] dB = dt[..., None] * B[..., None, :] # Discretize x into dB # [bsz, intermediate_size] -> [bsz, num_heads, head_dim] hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim) dBx = (dB * hidden_states[..., None]).to(device=cache_device) # State calculation cache_params.update_ssm_state( layer_idx=self.layer_idx, new_ssm_state=cache_params.ssm_states[self.layer_idx] * dA + dBx ) # Subsequent output # [bsz, n_groups * state_size] -> [bsz, num_heads, state_size] C = C.reshape(batch_size, self.n_groups, -1)[..., None, :] C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous() C = C.reshape(batch_size, -1, C.shape[-1]) # [bsz, num_heads, head_dim] ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n] # Reshape ssm_states to merge the first two dimensions ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n] C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1] y = torch.bmm(ssm_states_reshaped, C_reshaped) y = y.view(batch_size, self.num_heads, self.head_dim) # D skip connection # [num_heads] -> [num_heads, head_dim] D = self.D[..., None].expand(self.D.shape[0], self.head_dim) y = (y + hidden_states * D).to(y.dtype) # [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size] y = y.reshape(batch_size, -1)[:, None, ...] else: # begin ssd naive implementation without einsums dt = nn.functional.softplus(dt + self.dt_bias) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float() B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() B = B.repeat(1, 1, self.num_heads // self.n_groups, 1) C = C.repeat(1, 1, self.num_heads // self.n_groups, 1) pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size) # Discretize x and A hidden_states = hidden_states * dt[..., None] A = A.to(hidden_states.dtype) * dt # Rearrange into blocks/chunks hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)] # [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size] A = A.permute(0, 3, 1, 2) A_cumsum = torch.cumsum(A, dim=-1) # 1. Compute the output for each intra-chunk (diagonal blocks) # This is the analog of a causal mask L = torch.exp(segment_sum(A)) # Contraction of C and B to get G (attention-weights like) G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n) G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h) # Compute M, equivalent to applying attention mask to weights M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None] M = M_intermediate.sum(dim=-1) # Compute Y_diag (apply to values) Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3) # 2. Compute the state for each intra-chunk # (right term of low-rank factorization of off-diagonal blocks; B terms) decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum)) B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None] states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2) # 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries # (middle term of factorization of off-diag blocks; A terms) if cache_params is not None and cache_position is not None and cache_position[0] > 0: previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device) else: previous_states = torch.zeros_like(states[:, :1]) states = torch.cat([previous_states, states], dim=1) decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0)))) decay_chunk = decay_chunk.transpose(1, 3) new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1) states, ssm_state = new_states[:, :-1], new_states[:, -1] # 4. Compute state -> output conversion per chunk # (left term of low-rank factorization of off-diagonal blocks; C terms) state_decay_out = torch.exp(A_cumsum) C_times_states = (C[..., None, :] * states[:, :, None, ...]) state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1) Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None]) # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks) y = Y_diag + Y_off # [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim] y = y.reshape(batch_size, -1, self.num_heads, self.head_dim) y = y + D_residual # Cutting off padded chunks if pad_size > 0: y = y[:, :seq_len, :, :] y = y.reshape(batch_size, seq_len, -1) # Init cache if ssm_state is not None and cache_params is not None: cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state) scan_output = self.norm(y, gate) # end ssd naive # 4. Final linear projection contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size] return contextualized_states # fmt: on def forward( self, hidden_states, cache_params: Optional[Mamba2Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): if is_fast_path_available and "cuda" in self.in_proj.weight.device.type: return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask) dtype = hidden_states.dtype if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66 hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask) class Mamba2RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Mamba2RMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) class Mamba2Block(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.config = config self.layer_idx = layer_idx self.residual_in_fp32 = config.residual_in_fp32 self.norm = Mamba2RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.mixer = Mamba2Mixer(config, layer_idx=layer_idx) def forward( self, hidden_states, cache_params: Optional[Mamba2Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): residual = hidden_states hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype)) if self.residual_in_fp32: residual = residual.to(torch.float32) hidden_states = self.mixer( hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask ) hidden_states = residual + hidden_states return hidden_states class Mamba2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Mamba2Config base_model_prefix = "backbone" _no_split_modules = ["Mamba2Block"] supports_gradient_checkpointing = True _is_stateful = True def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, Mamba2Mixer): module.A_log._no_weight_decay = True module.D._no_weight_decay = True dt = torch.exp( torch.rand(self.config.num_heads) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min) ).clamp(min=self.config.time_step_floor) # # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 inv_dt = dt + torch.log(-torch.expm1(-dt)) with torch.no_grad(): module.dt_bias.copy_(inv_dt) module.dt_bias._no_reinit = True if isinstance(module, nn.Linear): if module.bias is not None: if not getattr(module.bias, "_no_reinit", False): nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): nn.init.normal_(module.weight, std=self.config.initializer_range) if self.config.rescale_prenorm_residual: # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py for name, p in module.named_parameters(): if name in ["out_proj.weight"]: # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block # Following Pytorch init, except scale by 1/sqrt(2 * n_layer) # We need to reinit p since this code could be called multiple times # Having just p *= scale would repeatedly scale it down nn.init.kaiming_uniform_(p, a=math.sqrt(5)) with torch.no_grad(): p /= math.sqrt(self.config.num_hidden_layers) @dataclass # Copied from transformers.models.mamba.modeling_mamba.MambaOutput with MAMBA->MAMBA2,Mamba->Mamba2 class Mamba2Output(ModelOutput): """ Class for the MAMBA2 model outputs. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. cache_params (`Mamba2Cache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: Optional[torch.FloatTensor] = None cache_params: Optional[Mamba2Cache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.mamba.modeling_mamba.MambaCausalLMOutput with Mamba->Mamba2 class Mamba2CausalLMOutput(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cache_params (`Mamba2Cache`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. Includes both the State space model state matrices after the selective scan, and the Convolutional states hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None cache_params: Optional[Mamba2Cache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None MAMBA2_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Mamba2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MAMBA2_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): Indices of input sequence tokens in the vocabulary. If `cache_params.seqlen_offset>0`, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. cache_params (`Mamba2Cache`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). use_cache (`bool`, *optional*): If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*): The position of the current input in the cache. This is used to ensure that the cache is correctly updated. If `cache_params` is passed, `cache_position` should also be passed. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ @add_start_docstrings( "The bare MAMBA2 Model transformer outputting raw hidden-states without any specific head on top.", MAMBA2_START_DOCSTRING, ) class Mamba2Model(Mamba2PreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = nn.ModuleList([Mamba2Block(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.norm_f = Mamba2RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) # Initialize weights and apply final processing self._register_load_state_dict_pre_hook(self.load_hook) self.post_init() def load_hook(self, state_dict, prefix, *args): for k in state_dict: if "embedding." in k: state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k) break def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embeddings): self.embeddings = new_embeddings @add_start_docstrings_to_model_forward(MAMBA2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Mamba2Output, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, cache_params: Optional[Mamba2Cache] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> Union[Tuple, Mamba2Output]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) if self.gradient_checkpointing and self.training and use_cache: use_cache = False if use_cache: if cache_params is None: cache_params = Mamba2Cache( self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype ) cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device) elif cache_position is None: # cases when we do manual forward instead of using `model.generate` which will initiate # `cache_position` and makes sure it is not None, throw error here instead of doing some # hack to conjecture the current cache position raise ValueError( "You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, " "you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will " "be initialized for you automatically" ) else: cache_params = None hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None for mixer_block in self.layers: if self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func( mixer_block.__call__, hidden_states, cache_params, cache_position, attention_mask ) else: hidden_states = mixer_block( hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask, ) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.norm_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None) return Mamba2Output( last_hidden_state=hidden_states, cache_params=cache_params if use_cache else None, hidden_states=all_hidden_states, ) @add_start_docstrings( """ The MAMBA2 Model transformer with a language modeling head on top (linear layer with weights not tied to the input embeddings). """, MAMBA2_START_DOCSTRING, ) class Mamba2ForCausalLM(Mamba2PreTrainedModel, GenerationMixin): _tied_weights_keys = [] def __init__(self, config): super().__init__(config) self.backbone = Mamba2Model(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.backbone.get_input_embeddings() def set_input_embeddings(self, new_embeddings): return self.backbone.set_input_embeddings(new_embeddings) def prepare_inputs_for_generation( self, input_ids, inputs_embeds=None, use_cache=None, cache_params: Optional[Mamba2Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ): # Overwitten -- uses `cache_params` as opposed to `past_key_values` if use_cache: # `cache_position` should have been initialized in `generate` if cache_position is None: raise ValueError( "`cache_position` should not be None as it should have been initialized in " "`model.generate`, you are responsible for passing in a valid `cache_position` if " "you are calling `prepare_inputs_for_generation` directly with `use_cache=True`" ) if cache_position[0] > 0: input_ids = input_ids[:, -1][..., None] if attention_mask is not None: attention_mask = None else: # we initialize the `cache_position` to full size of `conv_states` at prefill stage # considering padding will be applied when input length is shorter, and truncation # will be applied when it is longer, so it will be equivalent to always have it match # the length of `cache_params.conv_states`, which is `config.conv_kernel` cache_position = torch.arange(0, self.config.conv_kernel, device=input_ids.device) if inputs_embeds is not None and cache_params is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "attention_mask": attention_mask, "cache_params": cache_params, "use_cache": use_cache, "cache_position": cache_position, } ) return model_inputs @add_start_docstrings_to_model_forward(MAMBA2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Mamba2CausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, cache_params: Optional[Mamba2Cache] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, # for now we need this for generation ) -> Union[Tuple, Mamba2CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict mamba2_outputs = self.backbone( input_ids, cache_params=cache_params, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, cache_position=cache_position, attention_mask=attention_mask, ) hidden_states = mamba2_outputs[0] logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float() loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + mamba2_outputs[1:] return ((loss,) + output) if loss is not None else output return Mamba2CausalLMOutput( loss=loss, logits=logits, cache_params=mamba2_outputs.cache_params, hidden_states=mamba2_outputs.hidden_states, ) __all__ = ["Mamba2ForCausalLM", "Mamba2Model", "Mamba2PreTrainedModel"]
transformers/src/transformers/models/mamba2/modeling_mamba2.py/0
{ "file_path": "transformers/src/transformers/models/mamba2/modeling_mamba2.py", "repo_id": "transformers", "token_count": 23515 }
# coding=utf-8 # Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MBART model configuration""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging logger = logging.get_logger(__name__) class MBartConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MBartModel`]. It is used to instantiate an MBART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MBART [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MBartModel`] or [`TFMBartModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import MBartConfig, MBartModel >>> # Initializing a MBART facebook/mbart-large-cc25 style configuration >>> configuration = MBartConfig() >>> # Initializing a model (with random weights) from the facebook/mbart-large-cc25 style configuration >>> model = MBartModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mbart" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, forced_eos_token_id=forced_eos_token_id, **kwargs, ) # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig with Bart->MBart class MBartOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") elif self.task == "causal-lm": # TODO: figure this case out. common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} else: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) # Generate decoder inputs decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, decoder_seq_length, is_pair, framework ) decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, encoder_seq_length = common_inputs["input_ids"].shape decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) decoder_past_length = decoder_seq_length + 3 decoder_shape = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) common_inputs["decoder_attention_mask"] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1 ) common_inputs["past_key_values"] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(min_num_layers): common_inputs["past_key_values"].append( ( torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape), ) ) # TODO: test this. shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape # Not using the same length for past_key_values past_key_values_length = seqlen + 2 num_encoder_layers, _ = self.num_layers num_encoder_attention_heads, _ = self.num_attention_heads past_shape = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) mask_dtype = common_inputs["attention_mask"].dtype common_inputs["attention_mask"] = torch.cat( [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 ) common_inputs["past_key_values"] = [ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers) ] return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs def generate_dummy_inputs( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) elif self.task == "causal-lm": common_inputs = self._generate_dummy_inputs_for_causal_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ["default", "seq2seq-lm"]: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t ) __all__ = ["MBartConfig", "MBartOnnxConfig"]
transformers/src/transformers/models/mbart/configuration_mbart.py/0
{ "file_path": "transformers/src/transformers/models/mbart/configuration_mbart.py", "repo_id": "transformers", "token_count": 8060 }
#################################################################################################### # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPT2Config #################################################################################################### def recursive_print(name, val, spaces=0): # Format the message. if name is None: msg = None else: fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}" msg = fmt.format(name) # Print and recurse (if needed). if isinstance(val, dict): if msg is not None: print(msg) for k in val.keys(): recursive_print(k, val[k], spaces + 2) elif isinstance(val, torch.Tensor): print(msg, ":", val.size()) else: print(msg, ":", val) def fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size): # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. input_shape = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:] param = param.view(*saved_shape) param = param.transpose(0, 2) param = param.transpose(1, 2).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:] param = param.view(*saved_shape) param = param.transpose(0, 1).contiguous() param = param.view(*input_shape) return param #################################################################################################### def convert_megatron_checkpoint(args, input_state_dict, config): # The converted output model. output_state_dict = {} # old versions did not store training args ds_args = input_state_dict.get("args", None) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) config.vocab_size = ds_args.padded_vocab_size config.n_positions = ds_args.max_position_embeddings config.n_embd = ds_args.hidden_size config.n_layer = ds_args.num_layers config.n_head = ds_args.num_attention_heads config.n_inner = ds_args.ffn_hidden_size # pprint(config) # The number of heads. heads = config.n_head # The hidden_size per head. hidden_size_per_head = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): checkpoint_version = input_state_dict["checkpoint_version"] else: checkpoint_version = 0.0 # The model. model = input_state_dict["model"] # The language model. lm = model["language_model"] # The embeddings. embeddings = lm["embedding"] # The word embeddings. word_embeddings = embeddings["word_embeddings"]["weight"] # Truncate the embedding table to vocab_size rows. word_embeddings = word_embeddings[: config.vocab_size, :] output_state_dict["transformer.wte.weight"] = word_embeddings # The position embeddings. pos_embeddings = embeddings["position_embeddings"]["weight"] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] n_positions = pos_embeddings.size(0) if n_positions != config.n_positions: raise ValueError( f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" ) # Store the position embeddings. output_state_dict["transformer.wpe.weight"] = pos_embeddings # The transformer. transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"] # The regex to extract layer names. layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") # The simple map of names for "automated" rules. megatron_to_transformers = { "attention.dense": ".attn.c_proj.", "self_attention.dense": ".attn.c_proj.", "mlp.dense_h_to_4h": ".mlp.c_fc.", "mlp.dense_4h_to_h": ".mlp.c_proj.", } # Extract the layers. for key, val in transformer.items(): # Match the name. m = layer_re.match(key) # Stop if that's not a layer if m is None: break # The index of the layer. layer_idx = int(m.group(1)) # The name of the operation. op_name = m.group(2) # Is it a weight or a bias? weight_or_bias = m.group(3) # The name of the layer. layer_name = f"transformer.h.{layer_idx}" # For layernorm(s), simply store the layer norm. if op_name.endswith("layernorm"): ln_name = "ln_1" if op_name.startswith("input") else "ln_2" output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.float16)).view( 1, 1, n_positions, n_positions ) output_state_dict[layer_name + ".attn.bias"] = causal_mask # Insert a "dummy" tensor for masked_bias. masked_bias = torch.tensor(-1e4, dtype=torch.float16) output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. out_val = out_val.transpose(0, 1).contiguous() # Store. output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head) # Store. No change of shape. output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val # Transpose the weights. elif weight_or_bias == "weight": out_name = megatron_to_transformers[op_name] output_state_dict[layer_name + out_name + "weight"] = val.transpose(0, 1) # Copy the bias. elif weight_or_bias == "bias": out_name = megatron_to_transformers[op_name] output_state_dict[layer_name + out_name + "bias"] = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. output_state_dict["transformer.ln_f.weight"] = transformer["final_layernorm.weight"] output_state_dict["transformer.ln_f.bias"] = transformer["final_layernorm.bias"] # For LM head, transformers' wants the matrix to weight embeddings. output_state_dict["lm_head.weight"] = word_embeddings # It should be done! return output_state_dict #################################################################################################### def main(): # Create the argument parser. parser = argparse.ArgumentParser() parser.add_argument("--print-checkpoint-structure", action="store_true") parser.add_argument( "path_to_checkpoint", type=str, help="Path to the checkpoint file (.zip archive or direct .pt file)", ) parser.add_argument( "--config_file", default="", type=str, help="An optional config json file describing the pre-trained model.", ) args = parser.parse_args() # Extract the basename. basename = os.path.dirname(args.path_to_checkpoint) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}") if args.path_to_checkpoint.endswith(".zip"): with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint: with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict: input_state_dict = torch.load(pytorch_dict, map_location="cpu") else: input_state_dict = torch.load(args.path_to_checkpoint, map_location="cpu") ds_args = input_state_dict.get("args", None) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: activation_function = "gelu_fast" elif ds_args.openai_gelu: activation_function = "gelu_new" else: activation_function = "gelu" else: # in the very early days this used to be "gelu_new" activation_function = "gelu_new" # Spell out all parameters in case the defaults change. config = GPT2Config( vocab_size=50257, n_positions=1024, n_embd=1024, n_layer=24, n_head=16, n_inner=4096, activation_function=activation_function, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, summary_type="cls_index", summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, ) else: config = GPT2Config.from_json_file(args.config_file) config.architectures = ["GPT2LMHeadModel"] # Convert. print("Converting") output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(None, output_state_dict) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: tokenizer_type = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": tokenizer_model_name = "openai-community/gpt2" elif tokenizer_type == "PretrainedFromHF": tokenizer_model_name = ds_args.tokenizer_name_or_path else: raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}") else: tokenizer_model_name = "openai-community/gpt2" tokenizer = AutoTokenizer.from_pretrained(tokenizer_model_name) tokenizer_class = type(tokenizer).__name__ config.tokenizer_class = tokenizer_class # Store the config to file. print("Saving config") config.save_pretrained(basename) # Save tokenizer based on args print(f"Adding {tokenizer_class} tokenizer files") tokenizer.save_pretrained(basename) # Store the state_dict to file. output_checkpoint_file = os.path.join(basename, "pytorch_model.bin") print(f'Saving checkpoint to "{output_checkpoint_file}"') torch.save(output_state_dict, output_checkpoint_file) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
transformers/src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py/0
{ "file_path": "transformers/src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py", "repo_id": "transformers", "token_count": 5503 }
from typing import Callable, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...cache_utils import Cache, SlidingWindowCache, StaticCache from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import QuestionAnsweringModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import logging from ..llama.modeling_llama import ( LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaMLP, LlamaModel, apply_rotary_pos_emb, eager_attention_forward, ) from .configuration_mistral import MistralConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "mistralai/Mistral-7B-v0.1" class MistralMLP(LlamaMLP): def __init__(self, config): super().__init__(config) self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) class MistralAttention(LlamaAttention): def __init__(self, config: MistralConfig, layer_idx: int): super().__init__() self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=getattr(self.config, "sliding_window", None), # main diff with Llama **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class MistralDecoderLayer(LlamaDecoderLayer): def __init__(self, config: MistralConfig, layer_idx: int): super().__init__(config, layer_idx) self.self_attn = MistralAttention(config=config, layer_idx=layer_idx) self.mlp = MistralMLP(config) class MistralModel(LlamaModel): def __init__(self, config: MistralConfig): super().__init__(config) self.layers = nn.ModuleList( [MistralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and past_key_values is not None: is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0] if is_padding_right: raise ValueError( "You are attempting to perform batched generation with padding_side='right'" " this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to " " call `tokenizer.padding_side = 'left'` before tokenizing the input. " ) if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if ( self.config._attn_implementation == "sdpa" and not (using_static_cache or using_sliding_window_cache) and not output_attentions ): if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, sliding_window=self.config.sliding_window, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] # SlidingWindowCache or StaticCache if using_sliding_window_cache or using_static_cache: target_length = past_key_values.get_max_cache_shape() # DynamicCache or no cache else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], config=self.config, past_key_values=past_key_values, ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, config: MistralConfig, past_key_values: Cache, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. config (`MistralConfig`): The model's configuration class past_key_values (`Cache`): The cache class that is being used currently to generate """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) if config.sliding_window is not None: # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also # the check is needed to verify is current checkpoint was trained with sliding window or not if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length: sliding_attend_mask = torch.arange(target_length, device=device) <= ( cache_position.reshape(-1, 1) - config.sliding_window ) diagonal_attend_mask.bitwise_or_(sliding_attend_mask) causal_mask *= diagonal_attend_mask causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit if attention_mask.shape[-1] > target_length: attention_mask = attention_mask[:, :target_length] mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask class MistralForCausalLM(LlamaForCausalLM): pass class MistralForTokenClassification(LlamaForTokenClassification): pass class MistralForSequenceClassification(LlamaForSequenceClassification): pass class MistralForQuestionAnswering(LlamaForQuestionAnswering): base_model_prefix = "model" def __init__(self, config): super().__init__(config) self.model = MistralModel(config) # diff with Llama: transformer->model del self.transformer def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() loss = None if start_positions is not None and end_positions is not None: loss = self.loss_function(start_logits, end_logits, start_positions, end_positions, **kwargs) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return QuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/mistral/modular_mistral.py/0
{ "file_path": "transformers/src/transformers/models/mistral/modular_mistral.py", "repo_id": "transformers", "token_count": 6911 }
# Copyright 2024 Answer.AI, LightOn, and contributors, and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from contextlib import nullcontext from typing import Dict, Literal, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...configuration_utils import PretrainedConfig from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, logging, ) from ...utils.import_utils import is_triton_available from ..gemma.modeling_gemma import GemmaRotaryEmbedding, apply_rotary_pos_emb if is_flash_attn_2_available(): from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func from flash_attn.layers.rotary import RotaryEmbedding from flash_attn.ops.triton.rotary import apply_rotary else: RotaryEmbedding = object _CHECKPOINT_FOR_DOC = "answerdotai/ModernBERT-base" _CONFIG_FOR_DOC = "ModernBertConfig" logger = logging.get_logger(__name__) class ModernBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ModernBERT-base. e.g. [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50368): Vocabulary size of the ModernBert model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ModernBertModel`] hidden_size (`int`, *optional*, defaults to 768): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 1152): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 22): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the decoder. Will default to `"gelu"` if not specified. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_cutoff_factor (`float`, *optional*, defaults to 2.0): The cutoff factor for the truncated_normal_initializer for initializing all weight matrices. norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. norm_bias (`bool`, *optional*, defaults to `False`): Whether to use bias in the normalization layers. pad_token_id (`int`, *optional*, defaults to 50283): Padding token id. eos_token_id (`int`, *optional*, defaults to 50282): End of stream token id. bos_token_id (`int`, *optional*, defaults to 50281): Beginning of stream token id. cls_token_id (`int`, *optional*, defaults to 50281): Classification token id. sep_token_id (`int`, *optional*, defaults to 50282): Separation token id. global_rope_theta (`float`, *optional*, defaults to 160000.0): The base period of the global RoPE embeddings. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. global_attn_every_n_layers (`int`, *optional*, defaults to 3): The number of layers between global attention layers. local_attention (`int`, *optional*, defaults to 128): The window size for local attention. local_rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the local RoPE embeddings. embedding_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the embeddings. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use bias in the MLP layers. mlp_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the MLP layers. decoder_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the decoder layers. classifier_pooling (`str`, *optional*, defaults to `"cls"`): The pooling method for the classifier. Should be either `"cls"` or `"mean"`. In local attention layers, the CLS token doesn't attend to all tokens on long sequences. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the classifier. classifier_bias (`bool`, *optional*, defaults to `False`): Whether to use bias in the classifier. classifier_activation (`str`, *optional*, defaults to `"gelu"`): The activation function for the classifier. deterministic_flash_attn (`bool`, *optional*, defaults to `False`): Whether to use deterministic flash attention. If `False`, inference will be faster but not deterministic. sparse_prediction (`bool`, *optional*, defaults to `False`): Whether to use sparse prediction for the masked language model instead of returning the full dense logits. sparse_pred_ignore_index (`int`, *optional*, defaults to -100): The index to ignore for the sparse prediction. reference_compile (`bool`, *optional*): Whether to compile the layers of the model which were compiled during pretraining. If `None`, then parts of the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may be faster in some scenarios. repad_logits_with_grad (`bool`, *optional*, defaults to `False`): When True, ModernBertForMaskedLM keeps track of the logits' gradient when repadding for output. This only applies when using Flash Attention 2 with passed labels. Otherwise output logits always have a gradient. Examples: ```python >>> from transformers import ModernBertModel, ModernBertConfig >>> # Initializing a ModernBert style configuration >>> configuration = ModernBertConfig() >>> # Initializing a model from the modernbert-base style configuration >>> model = ModernBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "modernbert" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=50368, hidden_size=768, intermediate_size=1152, num_hidden_layers=22, num_attention_heads=12, hidden_activation="gelu", max_position_embeddings=8192, initializer_range=0.02, initializer_cutoff_factor=2.0, norm_eps=1e-5, norm_bias=False, pad_token_id=50283, eos_token_id=50282, bos_token_id=50281, cls_token_id=50281, sep_token_id=50282, global_rope_theta=160000.0, attention_bias=False, attention_dropout=0.0, global_attn_every_n_layers=3, local_attention=128, local_rope_theta=10000.0, embedding_dropout=0.0, mlp_bias=False, mlp_dropout=0.0, decoder_bias=True, classifier_pooling: Literal["cls", "mean"] = "cls", classifier_dropout=0.0, classifier_bias=False, classifier_activation="gelu", deterministic_flash_attn=False, sparse_prediction=False, sparse_pred_ignore_index=-100, reference_compile=None, repad_logits_with_grad=False, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, cls_token_id=cls_token_id, sep_token_id=sep_token_id, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.initializer_range = initializer_range self.initializer_cutoff_factor = initializer_cutoff_factor self.norm_eps = norm_eps self.norm_bias = norm_bias self.global_rope_theta = global_rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.hidden_activation = hidden_activation self.global_attn_every_n_layers = global_attn_every_n_layers self.local_attention = local_attention self.local_rope_theta = local_rope_theta self.embedding_dropout = embedding_dropout self.mlp_bias = mlp_bias self.mlp_dropout = mlp_dropout self.decoder_bias = decoder_bias self.classifier_pooling = classifier_pooling self.classifier_dropout = classifier_dropout self.classifier_bias = classifier_bias self.classifier_activation = classifier_activation self.deterministic_flash_attn = deterministic_flash_attn self.sparse_prediction = sparse_prediction self.sparse_pred_ignore_index = sparse_pred_ignore_index self.reference_compile = reference_compile self.repad_logits_with_grad = repad_logits_with_grad if self.classifier_pooling not in ["cls", "mean"]: raise ValueError( f'Invalid value for `classifier_pooling`, should be either "cls" or "mean", but is {self.classifier_pooling}.' ) def _unpad_modernbert_input( inputs: torch.Tensor, attention_mask: torch.Tensor, position_ids: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, Optional[torch.Tensor], Optional[torch.Tensor]]: """ Remove padding from input sequences. Args: inputs: (batch, seqlen, ...) or (batch, seqlen) attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid. position_ids: (batch, seqlen), int, position ids labels: (batch, seqlen), int, labels Returns: unpadded_inputs: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask. indices: (total_nnz) cu_seqlens: (batch + 1), the cumulative sequence lengths max_seqlen_in_batch: int unpadded_position_ids: (total_nnz) or None unpadded_labels: (total_nnz) or None """ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = int(seqlens_in_batch.max().item()) cu_seqlens = torch.nn.functional.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) if inputs.dim() == 2: unpadded_inputs = inputs.flatten()[indices] else: batch, seqlen, *rest = inputs.shape shape = batch * seqlen unpadded_inputs = inputs.view(shape, *rest)[indices] unpadded_position_ids = position_ids.flatten()[indices] if position_ids is not None else None unpadded_labels = labels.flatten()[indices] if labels is not None else None return unpadded_inputs, indices, cu_seqlens, max_seqlen_in_batch, unpadded_position_ids, unpadded_labels def _pad_modernbert_output( inputs: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int, ) -> torch.Tensor: """ Add padding to sequences. Args: inputs: (total_nnz, ...) or (total_nnz,), where total_nnz = number of tokens selected in attention_mask. indices: (total_nnz) batch: int, batch size seqlen: int, max sequence length Returns: padded_inputs: (batch, seqlen, ...) or (batch, seqlen) """ if inputs.dim() == 1: output = torch.zeros(batch * seqlen, dtype=inputs.dtype, device=inputs.device) output[indices] = inputs padded_inputs = output.view(batch, seqlen) else: _, *rest = inputs.shape output = torch.zeros(batch * seqlen, *rest, dtype=inputs.dtype, device=inputs.device) output[indices] = inputs padded_inputs = output.view(batch, seqlen, *rest) return padded_inputs class ApplyRotaryEmbUnpad(torch.autograd.Function): @staticmethod def forward( ctx, qkv, cos, sin, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, ): # (total_nnz, 3, nheads, headdim) qkv = qkv.contiguous() total_nnz, _three, _nheads, headdim = qkv.shape # We need qkv to be contiguous so that when we reshape to combine (3, nheads) dimensions, # we get the same tensor # qk = rearrange(qkv[:, :2], "b_s t h d -> b_s (t h) d") qk = qkv[:, :2].view(total_nnz, -1, headdim) apply_rotary( qk, cos, sin, seqlen_offsets=0, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, interleaved=False, inplace=True, ) ctx.save_for_backward(cos, sin, cu_seqlens) ctx.max_seqlen = max_seqlen return qkv @staticmethod def backward(ctx, do): cos, sin, cu_seqlens = ctx.saved_tensors do = do.contiguous() total_nnz, _three, _nheads, headdim = do.shape # We need dqkv to be contiguous so that when we reshape to combine (3, nheads) dimensions, # we get the same tensor dqk = do[:, :2].view(total_nnz, -1, headdim) apply_rotary( dqk, cos, sin, seqlen_offsets=0, cu_seqlens=cu_seqlens, max_seqlen=ctx.max_seqlen, interleaved=False, inplace=True, conjugate=True, ) return do, None, None, None, None, None, None def apply_rotary_unpadded( qkv, cos, sin, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, ): """ Arguments: qkv: (total_nnz, 3, nheads, headdim) - input tensor for packed QKV. cos, sin: (seqlen_rotary, rotary_dim / 2) interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of 1st half and 2nd half (GPT-NeoX style). inplace: if True, apply rotary embedding in-place. seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount. Most commonly used in inference when we have KV cache. cu_seqlens: (batch + 1,) or None max_seqlen: int Return: out: (total_nnz, dim) rotary_dim must be <= headdim Apply rotary embedding to the first rotary_dim of x. """ return ApplyRotaryEmbUnpad.apply(qkv, cos, sin, cu_seqlens, max_seqlen) class ModernBertUnpaddedRotaryEmbedding(RotaryEmbedding): """ The rotary position embeddings applied directly to unpadded sequences. """ def __init__( self, dim: int, base: float = 10000.0, max_seqlen: Optional[int] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): """ max_seqlen: if max_seqlen, device, and dtype are provided, we precompute the cos_sin_cache up to max_seqlen. If the max_seqlen, device, or dtype during training/inference differ, the cos_sin_cache wll be recomputed during the forward pass. """ super().__init__(dim=dim, base=base, pos_idx_in_fp32=True, device=device, interleaved=False) self.max_seqlen = max_seqlen if max_seqlen is not None and device is not None and dtype is not None: self._update_cos_sin_cache(max_seqlen, device=device, dtype=dtype) def forward( self, qkv: torch.Tensor, cu_seqlens: torch.Tensor, max_seqlen: Optional[int] = None, ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ Apply rotary embedding *inplace* to qkv. qkv: (total_nnz, 3, nheads, headdim) cu_seqlens: (batch + 1,) cumulative sequence lengths max_seqlen: int max seq length in the batch """ if max_seqlen is not None: self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype) qkv = apply_rotary_unpadded( qkv, self._cos_cached, self._sin_cached, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, ) return qkv def extra_repr(self) -> str: return f"dim={self.dim}, base={self.base}, scale_base={self.scale_base}" class ModernBertEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config: ModernBertConfig): super().__init__() self.config = config self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.drop = nn.Dropout(config.embedding_dropout) @torch.compile(dynamic=True) def compiled_embeddings(self, input_ids: torch.LongTensor) -> torch.Tensor: return self.drop(self.norm(self.tok_embeddings(input_ids))) def forward( self, input_ids: torch.LongTensor = None, inputs_embeds: Optional[torch.Tensor] = None ) -> torch.Tensor: if inputs_embeds is not None: hidden_states = self.drop(self.norm(inputs_embeds)) else: hidden_states = ( self.compiled_embeddings(input_ids) if self.config.reference_compile else self.drop(self.norm(self.tok_embeddings(input_ids))) ) return hidden_states class ModernBertMLP(nn.Module): """Applies the GLU at the end of each ModernBERT layer. Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate` and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that has similar functionality. """ def __init__(self, config: ModernBertConfig): super().__init__() self.config = config self.Wi = nn.Linear(config.hidden_size, int(config.intermediate_size) * 2, bias=config.mlp_bias) self.act = ACT2FN[config.hidden_activation] self.drop = nn.Dropout(config.mlp_dropout) self.Wo = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.mlp_bias) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: input, gate = self.Wi(hidden_states).chunk(2, dim=-1) return self.Wo(self.drop(self.act(input) * gate)) class ModernBertRotaryEmbedding(GemmaRotaryEmbedding): def __init__(self, config: ModernBertConfig, dim: int, base: float, device: Optional[torch.device] = None): super().__init__(self, config=config, device=device) inv_freq, self.attention_scaling = self.rope_init_fn(None, device, dim=dim, base=base) def eager_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, attention_mask: torch.Tensor, sliding_window_mask: torch.Tensor, position_ids: Optional[torch.LongTensor], local_attention: Tuple[int, int], bs: int, dim: int, output_attentions: Optional[bool] = False, **_kwargs, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: # qkv: [batch_size, seqlen, 3, nheads, headdim] cos, sin = module.rotary_emb(qkv, position_ids=position_ids) query, key, value = qkv.transpose(3, 1).unbind(dim=2) # query, key, value: [batch_size, heads, seq_len, head_dim] query, key = apply_rotary_pos_emb(query, key, cos, sin) scale = module.head_dim**-0.5 attn_weights = torch.matmul(query, key.transpose(2, 3)) * scale if local_attention != (-1, -1): attention_mask = sliding_window_mask attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=module.attention_dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bs, -1, dim) if output_attentions: return (attn_output, attn_weights) return (attn_output,) def flash_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, rotary_emb: ModernBertUnpaddedRotaryEmbedding, cu_seqlens: torch.Tensor, max_seqlen: int, local_attention: Tuple[int, int], bs: int, dim: int, target_dtype: torch.dtype = torch.bfloat16, **_kwargs, ) -> Tuple[torch.Tensor]: # (total_seqlen, 3, nheads, headdim) qkv = rotary_emb(qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen) convert_dtype = qkv.dtype not in (torch.float16, torch.bfloat16) if convert_dtype: # FA2 implementation only supports fp16 and bf16. If FA2 is supported, # bfloat16 must be supported as of FA2 2.5.7. (Turing GPUs not supported) orig_dtype = qkv.dtype qkv = qkv.to(target_dtype) attn = flash_attn_varlen_qkvpacked_func( qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, dropout_p=module.attention_dropout if module.training else 0.0, deterministic=module.deterministic_flash_attn, window_size=local_attention, ) attn = attn.to(orig_dtype) # type: ignore else: attn = flash_attn_varlen_qkvpacked_func( qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, dropout_p=module.attention_dropout if module.training else 0.0, deterministic=module.deterministic_flash_attn, window_size=local_attention, ) return (attn.view(bs, dim),) def sdpa_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, attention_mask: torch.Tensor, sliding_window_mask: torch.Tensor, position_ids: Optional[torch.LongTensor], local_attention: Tuple[int, int], bs: int, dim: int, **_kwargs, ) -> Tuple[torch.Tensor]: # qkv: [batch_size, seqlen, 3, nheads, headdim] cos, sin = module.rotary_emb(qkv, position_ids=position_ids) query, key, value = qkv.transpose(3, 1).unbind(dim=2) # query, key, value: [batch_size, heads, seq_len, head_dim] query, key = apply_rotary_pos_emb(query, key, cos, sin) if local_attention != (-1, -1): attention_mask = sliding_window_mask attn_output = ( F.scaled_dot_product_attention( query, key, value, dropout_p=module.attention_dropout if module.training else 0.0, attn_mask=attention_mask, ) .transpose(1, 2) .contiguous() ) attn_output = attn_output.view(bs, -1, dim) return (attn_output,) MODERNBERT_ATTENTION_FUNCTION = { "flash_attention_2": flash_attention_forward, "eager": eager_attention_forward, "sdpa": sdpa_attention_forward, } class ModernBertAttention(nn.Module): """Performs multi-headed self attention on a batch of unpadded sequences. If Flash Attention 2 is installed, this module uses Flash Attention to improve throughput. If Flash Attention 2 is not installed, the implementation will use PyTorch's SDPA kernel, which requires padding and unpadding inputs, adding some overhead. See `forward` method for additional details. """ def __init__(self, config: ModernBertConfig, layer_id: Optional[int] = None): super().__init__() self.config = config self.layer_id = layer_id if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})" ) self.attention_dropout = config.attention_dropout self.deterministic_flash_attn = config.deterministic_flash_attn self.num_heads = config.num_attention_heads self.head_dim = config.hidden_size // config.num_attention_heads self.all_head_size = self.head_dim * self.num_heads self.Wqkv = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=config.attention_bias) if layer_id % config.global_attn_every_n_layers != 0: self.local_attention = (config.local_attention // 2, config.local_attention // 2) else: self.local_attention = (-1, -1) rope_theta = config.global_rope_theta max_position_embeddings = config.max_position_embeddings if self.local_attention != (-1, -1): if config.local_rope_theta is not None: rope_theta = config.local_rope_theta max_position_embeddings = config.local_attention if config._attn_implementation == "flash_attention_2": self.rotary_emb = ModernBertUnpaddedRotaryEmbedding( dim=self.head_dim, max_seqlen=max_position_embeddings, base=rope_theta ) else: self.rotary_emb = ModernBertRotaryEmbedding(config=config, dim=self.head_dim, base=rope_theta) self.Wo = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias) self.out_drop = nn.Dropout(config.attention_dropout) if config.attention_dropout > 0.0 else nn.Identity() self.pruned_heads = set() def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, **kwargs, ) -> torch.Tensor: qkv = self.Wqkv(hidden_states) bs = hidden_states.shape[0] if self.config._attn_implementation == "flash_attention_2": qkv = qkv.view(-1, 3, self.num_heads, self.head_dim) else: qkv = qkv.view(bs, -1, 3, self.num_heads, self.head_dim) attn_outputs = MODERNBERT_ATTENTION_FUNCTION[self.config._attn_implementation]( self, qkv=qkv, rotary_emb=self.rotary_emb, local_attention=self.local_attention, bs=bs, dim=self.all_head_size, output_attentions=output_attentions, **kwargs, ) hidden_states = attn_outputs[0] hidden_states = self.out_drop(self.Wo(hidden_states)) return (hidden_states,) + attn_outputs[1:] # add attentions if outputted class ModernBertEncoderLayer(nn.Module): def __init__(self, config: ModernBertConfig, layer_id: Optional[int] = None): super().__init__() self.config = config if layer_id == 0: self.attn_norm = nn.Identity() else: self.attn_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.attn = ModernBertAttention(config=config, layer_id=layer_id) self.mlp_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.mlp = ModernBertMLP(config) @torch.compile(dynamic=True) def compiled_mlp(self, hidden_states: torch.Tensor) -> torch.Tensor: return self.mlp(self.mlp_norm(hidden_states)) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, output_attentions: Optional[bool] = False, ) -> torch.Tensor: attn_outputs = self.attn( self.attn_norm(hidden_states), attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, output_attentions=output_attentions, ) hidden_states = hidden_states + attn_outputs[0] mlp_output = ( self.compiled_mlp(hidden_states) if self.config.reference_compile else self.mlp(self.mlp_norm(hidden_states)) ) hidden_states = hidden_states + mlp_output return (hidden_states,) + attn_outputs[1:] # add attentions if outputted MODERNBERT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ModernBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare ModernBert Model outputting raw hidden-states without any specific head on top.", MODERNBERT_START_DOCSTRING, ) class ModernBertPreTrainedModel(PreTrainedModel): config_class = ModernBertConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["ModernBertEmbeddings", "ModernBertEncoderLayer"] _supports_flash_attn_2 = True _supports_sdpa = True _supports_flex_attn = False def _init_weights(self, module: nn.Module): cutoff_factor = self.config.initializer_cutoff_factor if cutoff_factor is None: cutoff_factor = 3 def init_weight(module: nn.Module, std: float): nn.init.trunc_normal_( module.weight, mean=0.0, std=std, a=-cutoff_factor * std, b=cutoff_factor * std, ) if isinstance(module, nn.Linear): if module.bias is not None: nn.init.zeros_(module.bias) stds = { "in": self.config.initializer_range, "out": self.config.initializer_range / math.sqrt(2.0 * self.config.num_hidden_layers), "embedding": self.config.initializer_range, "final_out": self.config.hidden_size**-0.5, } if isinstance(module, ModernBertEmbeddings): init_weight(module.tok_embeddings, stds["embedding"]) elif isinstance(module, ModernBertMLP): init_weight(module.Wi, stds["in"]) init_weight(module.Wo, stds["out"]) elif isinstance(module, ModernBertAttention): init_weight(module.Wqkv, stds["in"]) init_weight(module.Wo, stds["out"]) elif isinstance(module, ModernBertPredictionHead): init_weight(module.dense, stds["out"]) elif isinstance(module, ModernBertForMaskedLM): init_weight(module.decoder, stds["out"]) elif isinstance(module, (ModernBertForSequenceClassification, ModernBertForTokenClassification)): init_weight(module.classifier, stds["final_out"]) @classmethod def _autoset_attn_implementation( cls, config, use_flash_attention_2: bool = False, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[Union[str, Dict[str, int]]] = None, check_device_map: bool = True, ): # If the user didn't specify anything, try to use flash_attention_2 if available. # Otherwise we fall back to the default SDPA -> Eager from the super() method. # ModernBert's FA2 implementation correctly handles non-fp16/bf16 dtypes, we don't # need the FA2 warning for non-fp16/bf16 dtypes so we set fp16 for the FA2 check. if config._attn_implementation_internal is None: config._attn_implementation_internal = "flash_attention_2" try: return cls._check_and_enable_flash_attn_2( config, torch_dtype=torch.float16, device_map=device_map, hard_check_only=False, check_device_map=check_device_map, ) except (ValueError, ImportError): config._attn_implementation_internal = None return super()._autoset_attn_implementation( config, use_flash_attention_2=use_flash_attention_2, torch_dtype=torch.float16, device_map=device_map, check_device_map=check_device_map, ) def _maybe_set_compile(self): if self.config.reference_compile is False: return if hasattr(self, "hf_device_map") and len(self.hf_device_map) > 1: if self.config.reference_compile: logger.warning_once( "If `accelerate` split the model across devices, `torch.compile` will not work. " "Falling back to non-compiled mode." ) self.config.reference_compile = False if self.device.type == "mps": if self.config.reference_compile: logger.warning_once( "Compiling the model with `torch.compile` and using a `torch.mps` device is not supported. " "Falling back to non-compiled mode." ) self.config.reference_compile = False if self.device.type == "cpu": if self.config.reference_compile: logger.warning_once( "Compiling the model with `torch.compile` and using a `torch.cpu` device is not supported. " "Falling back to non-compiled mode." ) self.config.reference_compile = False if self.config.reference_compile is None: self.config.reference_compile = is_triton_available() def resize_token_embeddings(self, *args, **kwargs): model_embeds = super().resize_token_embeddings(*args, **kwargs) if self.config.reference_compile in {True, None}: if self.config.reference_compile: logger.warning_once( "Resizing token embeddings with `torch.compile` is not supported. Falling back to non-compiled mode." ) self.config.reference_compile = False return model_embeds MODERNBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. With Flash Attention 2.0, padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ModernBert Model outputting raw hidden-states without any specific head on top.", MODERNBERT_START_DOCSTRING, ) class ModernBertModel(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.config = config self.embeddings = ModernBertEmbeddings(config) self.layers = nn.ModuleList( [ModernBertEncoderLayer(config, layer_id) for layer_id in range(config.num_hidden_layers)] ) self.final_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embeddings.tok_embeddings def set_input_embeddings(self, value): self.embeddings.tok_embeddings = value @add_start_docstrings_to_model_forward(MODERNBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None self._maybe_set_compile() if input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) if batch_size is None and seq_len is None: if inputs_embeds is not None: batch_size, seq_len = inputs_embeds.shape[:2] else: batch_size, seq_len = input_ids.shape[:2] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) repad = False if self.config._attn_implementation == "flash_attention_2": if indices is None and cu_seqlens is None and max_seqlen is None: repad = True if inputs_embeds is None: with torch.no_grad(): input_ids, indices, cu_seqlens, max_seqlen, *_ = _unpad_modernbert_input( inputs=input_ids, attention_mask=attention_mask ) else: inputs_embeds, indices, cu_seqlens, max_seqlen, *_ = _unpad_modernbert_input( inputs=inputs_embeds, attention_mask=attention_mask ) else: if position_ids is None: position_ids = torch.arange(seq_len, device=device).unsqueeze(0) attention_mask, sliding_window_mask = self._update_attention_mask( attention_mask, output_attentions=output_attentions ) hidden_states = self.embeddings(input_ids=input_ids, inputs_embeds=inputs_embeds) for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, sliding_window_mask, position_ids, cu_seqlens, max_seqlen, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions and len(layer_outputs) > 1: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.final_norm(hidden_states) if repad: hidden_states = _pad_modernbert_output( inputs=hidden_states, indices=indices, batch=batch_size, seqlen=seq_len ) if all_hidden_states is not None: all_hidden_states = tuple( _pad_modernbert_output(inputs=hs, indices=indices, batch=batch_size, seqlen=seq_len) for hs in all_hidden_states ) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def _update_attention_mask(self, attention_mask: torch.Tensor, output_attentions: bool) -> torch.Tensor: if output_attentions: if self.config._attn_implementation == "sdpa": logger.warning_once( "Outputting attentions is only supported with the 'eager' attention implementation, " 'not with "sdpa". Falling back to `attn_implementation="eager"`.' ) self.config._attn_implementation = "eager" elif self.config._attn_implementation != "eager": logger.warning_once( "Outputting attentions is only supported with the eager attention implementation, " f'not with {self.config._attn_implementation}. Consider setting `attn_implementation="eager"`.' " Setting `output_attentions=False`." ) global_attention_mask = _prepare_4d_attention_mask(attention_mask, self.dtype) # Create position indices rows = torch.arange(global_attention_mask.shape[2]).unsqueeze(0) # Calculate distance between positions distance = torch.abs(rows - rows.T) # Create sliding window mask (1 for positions within window, 0 outside) window_mask = ( (distance <= self.config.local_attention // 2).unsqueeze(0).unsqueeze(0).to(attention_mask.device) ) # Combine with existing mask sliding_window_mask = global_attention_mask.masked_fill(window_mask.logical_not(), torch.finfo(self.dtype).min) return global_attention_mask, sliding_window_mask class ModernBertPredictionHead(nn.Module): def __init__(self, config: ModernBertConfig): super().__init__() self.config = config self.dense = nn.Linear(config.hidden_size, config.hidden_size, config.classifier_bias) self.act = ACT2FN[config.classifier_activation] self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return self.norm(self.act(self.dense(hidden_states))) @add_start_docstrings( "The ModernBert Model with a decoder head on top that is used for masked language modeling.", MODERNBERT_START_DOCSTRING, ) class ModernBertForMaskedLM(ModernBertPreTrainedModel): _tied_weights_keys = ["decoder.weight"] def __init__(self, config: ModernBertConfig): super().__init__(config) self.config = config self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias) self.sparse_prediction = self.config.sparse_prediction self.sparse_pred_ignore_index = self.config.sparse_pred_ignore_index # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.decoder def set_output_embeddings(self, new_embeddings: nn.Linear): self.decoder = new_embeddings @torch.compile(dynamic=True) def compiled_head(self, output: torch.Tensor) -> torch.Tensor: return self.decoder(self.head(output)) @add_start_docstrings_to_model_forward(MODERNBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() if self.config._attn_implementation == "flash_attention_2": if indices is None and cu_seqlens is None and max_seqlen is None: if batch_size is None and seq_len is None: if inputs_embeds is not None: batch_size, seq_len = inputs_embeds.shape[:2] else: batch_size, seq_len = input_ids.shape[:2] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) if inputs_embeds is None: with torch.no_grad(): input_ids, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input( inputs=input_ids, attention_mask=attention_mask, position_ids=position_ids, labels=labels ) else: inputs_embeds, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input( inputs=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, labels=labels ) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] if self.sparse_prediction and labels is not None: # flatten labels and output first labels = labels.view(-1) last_hidden_state = last_hidden_state.view(labels.shape[0], -1) # then filter out the non-masked tokens mask_tokens = labels != self.sparse_pred_ignore_index last_hidden_state = last_hidden_state[mask_tokens] labels = labels[mask_tokens] logits = ( self.compiled_head(last_hidden_state) if self.config.reference_compile else self.decoder(self.head(last_hidden_state)) ) loss = None if labels is not None: loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size) if self.config._attn_implementation == "flash_attention_2": with nullcontext() if self.config.repad_logits_with_grad or labels is None else torch.no_grad(): logits = _pad_modernbert_output(inputs=logits, indices=indices, batch=batch_size, seqlen=seq_len) if not return_dict: output = (logits,) return ((loss,) + output) if loss is not None else output return MaskedLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( "The ModernBert Model with a sequence classification head on top that performs pooling.", MODERNBERT_START_DOCSTRING, ) class ModernBertForSequenceClassification(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.num_labels = config.num_labels self.config = config self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.drop = torch.nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MODERNBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] if self.config.classifier_pooling == "cls": last_hidden_state = last_hidden_state[:, 0] elif self.config.classifier_pooling == "mean": last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum( dim=1, keepdim=True ) pooled_output = self.head(last_hidden_state) pooled_output = self.drop(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( "The ModernBert Model with a token classification head on top, e.g. for Named Entity Recognition (NER) tasks.", MODERNBERT_START_DOCSTRING, ) class ModernBertForTokenClassification(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.num_labels = config.num_labels self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.drop = torch.nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MODERNBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] last_hidden_state = self.head(last_hidden_state) last_hidden_state = self.drop(last_hidden_state) logits = self.classifier(last_hidden_state) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "ModernBertConfig", "ModernBertModel", "ModernBertPreTrainedModel", "ModernBertForMaskedLM", "ModernBertForSequenceClassification", "ModernBertForTokenClassification", ]
transformers/src/transformers/models/modernbert/modular_modernbert.py/0
{ "file_path": "transformers/src/transformers/models/modernbert/modular_modernbert.py", "repo_id": "transformers", "token_count": 27797 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert MusicGen checkpoints from the original repository.""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, T5EncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) EXPECTED_MISSING_KEYS = ["model.decoder.embed_positions.weights"] def rename_keys(name): if "emb" in name: name = name.replace("emb", "model.decoder.embed_tokens") if "transformer" in name: name = name.replace("transformer", "model.decoder") if "cross_attention" in name: name = name.replace("cross_attention", "encoder_attn") if "linear1" in name: name = name.replace("linear1", "fc1") if "linear2" in name: name = name.replace("linear2", "fc2") if "norm1" in name: name = name.replace("norm1", "self_attn_layer_norm") if "norm_cross" in name: name = name.replace("norm_cross", "encoder_attn_layer_norm") if "norm2" in name: name = name.replace("norm2", "final_layer_norm") if "out_norm" in name: name = name.replace("out_norm", "model.decoder.layer_norm") if "linears" in name: name = name.replace("linears", "lm_heads") if "condition_provider.conditioners.description.output_proj" in name: name = name.replace("condition_provider.conditioners.description.output_proj", "enc_to_dec_proj") return name def rename_state_dict(state_dict: OrderedDict, hidden_size: int) -> Tuple[Dict, Dict]: """Function that takes the fairseq Musicgen state dict and renames it according to the HF module names. It further partitions the state dict into the decoder (LM) state dict, and that for the encoder-decoder projection.""" keys = list(state_dict.keys()) enc_dec_proj_state_dict = {} for key in keys: val = state_dict.pop(key) key = rename_keys(key) if "in_proj_weight" in key: # split fused qkv proj state_dict[key.replace("in_proj_weight", "q_proj.weight")] = val[:hidden_size, :] state_dict[key.replace("in_proj_weight", "k_proj.weight")] = val[hidden_size : 2 * hidden_size, :] state_dict[key.replace("in_proj_weight", "v_proj.weight")] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: enc_dec_proj_state_dict[key[len("enc_to_dec_proj.") :]] = val else: state_dict[key] = val return state_dict, enc_dec_proj_state_dict def decoder_config_from_checkpoint(checkpoint: str) -> MusicgenDecoderConfig: if checkpoint.endswith("small"): # default config values hidden_size = 1024 num_hidden_layers = 24 num_attention_heads = 16 elif checkpoint.endswith("medium"): hidden_size = 1536 num_hidden_layers = 48 num_attention_heads = 24 elif checkpoint.endswith("large"): hidden_size = 2048 num_hidden_layers = 48 num_attention_heads = 32 else: raise ValueError( "Checkpoint should be one of `['small', 'medium', 'large']` for the mono checkpoints, " "`['facebook/musicgen-stereo-small', 'facebook/musicgen-stereo-medium', 'facebook/musicgen-stereo-large']` " f"for the stereo checkpoints, or a custom checkpoint with the checkpoint size as a suffix, got {checkpoint}." ) if "stereo" in checkpoint: audio_channels = 2 num_codebooks = 8 else: audio_channels = 1 num_codebooks = 4 config = MusicgenDecoderConfig( hidden_size=hidden_size, ffn_dim=hidden_size * 4, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_codebooks=num_codebooks, audio_channels=audio_channels, ) return config @torch.no_grad() def convert_musicgen_checkpoint( checkpoint, pytorch_dump_folder=None, repo_id=None, device="cpu", safe_serialization=False ): fairseq_model = MusicGen.get_pretrained(checkpoint, device=device) decoder_config = decoder_config_from_checkpoint(checkpoint) decoder_state_dict = fairseq_model.lm.state_dict() decoder_state_dict, enc_dec_proj_state_dict = rename_state_dict( decoder_state_dict, hidden_size=decoder_config.hidden_size ) text_encoder = T5EncoderModel.from_pretrained("google-t5/t5-base") audio_encoder = EncodecModel.from_pretrained("facebook/encodec_32khz") decoder = MusicgenForCausalLM(decoder_config).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection missing_keys, unexpected_keys = decoder.load_state_dict(decoder_state_dict, strict=False) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder")) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(key) if len(missing_keys) > 0: raise ValueError(f"Missing key(s) in state_dict: {missing_keys}") if len(unexpected_keys) > 0: raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}") # init the composite model model = MusicgenForConditionalGeneration(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(enc_dec_proj_state_dict) # check we can do a forward pass input_ids = torch.arange(0, 2 * decoder_config.num_codebooks, dtype=torch.long).reshape(2, -1) decoder_input_ids = input_ids.reshape(2 * decoder_config.num_codebooks, -1) with torch.no_grad(): logits = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits if logits.shape != (2 * decoder_config.num_codebooks, 1, 2048): raise ValueError("Incorrect shape for logits") # now construct the processor tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") feature_extractor = AutoFeatureExtractor.from_pretrained( "facebook/encodec_32khz", padding_side="left", feature_size=decoder_config.audio_channels ) processor = MusicgenProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer) # set the appropriate bos/pad token ids model.generation_config.decoder_start_token_id = 2048 model.generation_config.pad_token_id = 2048 # set other default generation config params model.generation_config.max_length = int(30 * audio_encoder.config.frame_rate) model.generation_config.do_sample = True model.generation_config.guidance_scale = 3.0 if pytorch_dump_folder is not None: Path(pytorch_dump_folder).mkdir(exist_ok=True) logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}") model.save_pretrained(pytorch_dump_folder, safe_serialization=safe_serialization) processor.save_pretrained(pytorch_dump_folder) if repo_id: logger.info(f"Pushing model {checkpoint} to {repo_id}") model.push_to_hub(repo_id, safe_serialization=safe_serialization) processor.push_to_hub(repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: " "`['small', 'medium', 'large']` for the mono checkpoints, " "`['facebook/musicgen-stereo-small', 'facebook/musicgen-stereo-medium', 'facebook/musicgen-stereo-large']` " "for the stereo checkpoints, or a custom checkpoint with the checkpoint size as a suffix.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) parser.add_argument( "--safe_serialization", action="store_true", help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).", ) args = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
transformers/src/transformers/models/musicgen/convert_musicgen_transformers.py/0
{ "file_path": "transformers/src/transformers/models/musicgen/convert_musicgen_transformers.py", "repo_id": "transformers", "token_count": 3644 }
# coding=utf-8 # Copyright 2024 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for model MyT5.""" import json import os import warnings from collections import defaultdict from typing import Dict, List, Optional, Tuple, Union from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "byte_maps.json"} class ByteRewriter: """ Byte rewriter class for MyT5 tokenizer. This class is used to rewrite bytes using a hash tree. The hash tree is constructed from a set of rewriting rules. Args: rewriting_rules (`str` or `Dict[str, str]`): A path to a json file containing the rewriting rules or a dictionary containing the rewriting rules. """ LEAF = "[LEAF]" def __init__(self, rewriting_rules: Union[str, Dict[str, str]]): if isinstance(rewriting_rules, str): with open(rewriting_rules, "r") as f: rewriting_rules = json.load(f) elif not isinstance(rewriting_rules, dict): raise ValueError( f"rewriting_rules should be either a path to json file or a dict, got {type(rewriting_rules)}" ) self.hash_tree = self.construct_hash_tree(rewriting_rules) reverse_rewriting_rules = {v: k for k, v in rewriting_rules.items()} self.reverse_hash_tree = self.construct_hash_tree(reverse_rewriting_rules) def add_leaf(self, hash_tree: Dict[str, Union[dict, List[str]]], byte_in_sequence: str, byte_out_sequence: str): """ Add a leaf with the output byte sequence to the hash tree. """ byte_in_list = byte_in_sequence.split(" ") byte_out_list = byte_out_sequence.split(" ") tree_pointer = hash_tree for b in byte_in_list: if b not in tree_pointer: tree_pointer[b] = {} tree_pointer = tree_pointer[b] tree_pointer[self.LEAF] = byte_out_list def construct_hash_tree(self, rewriting_rules: Dict[str, str]) -> Dict[str, Union[dict, List[str]]]: """ Construct a hash tree for rewritten byte sequences. """ hash_tree = defaultdict(dict) for b in (f"{x:02x}" for x in range(256)): hash_tree[b][self.LEAF] = [b] for in_sequence, out_sequence in rewriting_rules.items(): self.add_leaf(hash_tree, in_sequence, out_sequence) return hash_tree def search_hash_tree(self, byte_sequence: List[str]) -> Union[None, List[str]]: """ Search the hash tree and return the rewritten byte sequence if found. """ tree_pointer = self.hash_tree for b in byte_sequence: if b in tree_pointer: tree_pointer = tree_pointer[b] else: return None return tree_pointer[self.LEAF] def rewrite_bytes(self, in_bytes: List[str], reverse=False) -> List[str]: """ Rewrite a sequence of bytes using the hash tree. Args: in_bytes (`List[str]`): A list of bytes to be rewritten. reverse (`bool`): If True, decoding is performed with the reverse hash tree. Returns: `List[str]`: The rewritten byte sequence. """ out_bytes = [] b_start = 0 b_end = 0 while b_start < len(in_bytes): tree_pointer = self.hash_tree if not reverse else self.reverse_hash_tree for j in range(b_start, len(in_bytes)): b = in_bytes[j] if b in tree_pointer: tree_pointer = tree_pointer[b] elif j == b_start: cur_leaf = [b] b_end = j break else: break if self.LEAF in tree_pointer: cur_leaf = tree_pointer[self.LEAF] b_end = j out_bytes.extend(cur_leaf) b_start = b_end + 1 return out_bytes class MyT5Tokenizer(PreTrainedTokenizer): """ Construct a MyT5 tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): The file containing the byte rewriting rules. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. extra_ids (`int`, *optional*, defaults to 125): Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary like in ByT5 preprocessing see [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)). additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. """ model_input_names = ["input_ids", "attention_mask"] vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, eos_token="</s>", unk_token="<unk>", pad_token="<pad>", extra_ids=125, additional_special_tokens=None, **kwargs, ) -> None: # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)] elif extra_ids > 0 and additional_special_tokens is not None and len(additional_special_tokens) > 0: # Check that we have the right number of extra_id special tokens extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens))) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to MyT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens" ) pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token # unk token needs to be in the vocab with correct index self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token} self.offset = len(self._added_tokens_decoder) self._utf_vocab_size = 2**8 # utf is 8 bits # Load byte maps self.byte_maps = json.load(open(vocab_file, "r")) self.decompose_rewriter = ByteRewriter(self.byte_maps["decompose_map"]) self.merge_rewriter = ByteRewriter(self.byte_maps["merge_map"]) super().__init__( eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=0, additional_special_tokens=additional_special_tokens, **kwargs, ) @property def vocab_size(self): return self._utf_vocab_size # Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.get_vocab def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)} vocab.update(self.added_tokens_encoder) return vocab # Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) # normal case: some special tokens if token_ids_1 is None: return ([0] * len(token_ids_0)) + [1] return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]: """Do not add eos again if user already added it.""" if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MyT5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0] # Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ token_ids_0 = self._add_eos_if_not_present(token_ids_0) if token_ids_1 is None: return token_ids_0 else: token_ids_1 = self._add_eos_if_not_present(token_ids_1) return token_ids_0 + token_ids_1 def _tokenize(self, text: str, **kwargs) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words. Represents tokens in two character hex format""" tokens = [f"{i:02x}" for i in text.encode("utf-8")] tokens = self.morphological_encode(tokens) return tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if len(token) != 2: token_id = None else: token_id = int(token, 16) + self.offset return token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = f"{index - self.offset:02x}" return token def morphological_encode(self, indices: List[str]) -> List[str]: # Decompose and merge morphological sequences indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=False) indices = self.merge_rewriter.rewrite_bytes(indices, reverse=False) return indices def morphological_decode(self, indices: List[str]) -> List[str]: # Demerge and compose morphological sequences indices = self.merge_rewriter.rewrite_bytes(indices, reverse=True) indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=True) return indices def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" bstring = b"" out_tokens = [] for token in tokens: if token in self.added_tokens_decoder: out_tokens.append(self.added_tokens_decoder[token]) elif token in self.added_tokens_encoder: out_tokens.append(token) else: out_tokens.append(token) out_tokens = self.morphological_decode(out_tokens) _added_tokens = set(self.added_tokens_decoder.values()) | set(self.added_tokens_encoder) for token in out_tokens: if token in _added_tokens: bstring += bytes(token, "utf-8") else: bstring += bytes.fromhex(token) string = bstring.decode("utf-8", errors="ignore") return string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: writer.write(json.dumps(self.byte_maps, indent=2, ensure_ascii=False)) return (vocab_file,) __all__ = ["MyT5Tokenizer"]
transformers/src/transformers/models/myt5/tokenization_myt5.py/0
{ "file_path": "transformers/src/transformers/models/myt5/tokenization_myt5.py", "repo_id": "transformers", "token_count": 6831 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fast tokenizer class for Nougat. """ import re from functools import partial from multiprocessing import Pool from typing import List, Union import numpy as np from transformers.tokenization_utils_base import INIT_TOKENIZER_DOCSTRING from transformers.tokenization_utils_fast import PreTrainedTokenizerFast from transformers.utils import add_end_docstrings from ...utils import is_levenshtein_available, is_nltk_available, logging, requires_backends if is_levenshtein_available(): from Levenshtein import ratio if is_nltk_available(): import nltk logger = logging.get_logger(__name__) INIT_TOKENIZER_DOCSTRING += """ tokenizer_object ([`tokenizers.Tokenizer`]): A [`tokenizers.Tokenizer`] object from 🤗 tokenizers to instantiate from. See [Using tokenizers from 🤗 tokenizers](../fast_tokenizers) for more information. tokenizer_file ([`str`]): A path to a local JSON file representing a previously serialized [`tokenizers.Tokenizer`] object from 🤗 tokenizers. """ VOCAB_FILES_NAMES = {"tokenizer_file": "tokenizer.json"} def markdown_compatible(text: str) -> str: """ Make text compatible with Markdown formatting. This function makes various text formatting adjustments to make it compatible with Markdown. Args: text (`str`): The input text to be made Markdown-compatible. Returns: `str`: The Markdown-compatible text. """ # equation tag # Replace lines that start with a pattern like (decimal) \[some text\] with \[[some text] \tag{decimal}\]. text = re.sub(r"^\(([\d.]+[a-zA-Z]?)\) \\\[(.+?)\\\]$", r"\[\2 \\tag{\1}\]", text, flags=re.M) # Replace lines that start with a pattern like \[some text\] (decimal) with \[[some text] \tag{decimal}\]. text = re.sub(r"^\\\[(.+?)\\\] \(([\d.]+[a-zA-Z]?)\)$", r"\[\1 \\tag{\2}\]", text, flags=re.M) # Replace lines that start with a pattern like \[some text\] (digits) \[another text\] with \[[some text] \tag{digits}\] [another text]. text = re.sub( r"^\\\[(.+?)\\\] \(([\d.]+[a-zA-Z]?)\) (\\\[.+?\\\])$", r"\[\1 \\tag{\2}\] \3", text, flags=re.M, ) # multi line text = text.replace(r"\. ", ". ") # bold formatting text = text.replace(r"\bm{", r"\mathbf{").replace(r"{\\bm ", r"\mathbf{") text = re.sub(r"\\mbox{ ?\\boldmath\$(.*?)\$}", r"\\mathbf{\1}", text) # Reformat urls (http, ftp and https only) to markdown [url](url) clickable format text = re.sub( r"((?:http|ftp|https):\/\/(?:[\w_-]+(?:(?:\.[\w_-]+)+))(?:[\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-]))", r"[\1](\1)", text, ) # algorithms text = re.sub(r"```\s*(.+?)\s*```", r"```\n\1\n```", text, flags=re.S) return text def normalize_list_like_lines(generation): """ Normalize lines in the given text that resemble list items. The function looks for lines that start optionally with '-' or '*', possibly followed by Roman numerals or digits indicating nesting levels. The function reformats such lines to make them more structured. Args: generation (str): The input text containing lines that need to be normalized. Returns: str: The input text with the list-like lines normalized. Note: The function uses regular expressions to identify and reformat the list-like lines. The patterns capture optional bullet points, nesting levels indicated by numerals, and the actual list item content. The normalization adjusts the bullet point style and nesting levels based on the captured patterns. """ # This matches lines starting with - or *, not followed by - or * (lists) # that are then numbered by digits \d or roman numerals (one or more) # and then, optional additional numbering of this line is captured # this is then fed to re.finditer. pattern = r"(?:^)(-|\*)?(?!-|\*) ?((?:\d|[ixv])+ )?.+? (-|\*) (((?:\d|[ixv])+)\.(\d|[ixv]) )?.*(?:$)" for match in reversed(list(re.finditer(pattern, generation, flags=re.I | re.M))): start, stop = match.span() delim = match.group(3) + " " splits = match.group(0).split(delim) replacement = "" if match.group(1) is not None: splits = splits[1:] delim1 = match.group(1) + " " else: delim1 = "" continue # Skip false positives pre, post = generation[:start], generation[stop:] for i, item in enumerate(splits): level = 0 potential_numeral, _, rest = item.strip().partition(" ") if not rest: continue # Infer current nesting level based on detected numbering if re.match(r"^[\dixv]+((?:\.[\dixv])?)+$", potential_numeral, flags=re.I | re.M): level = potential_numeral.count(".") replacement += ( ("\n" if i > 0 else "") + ("\t" * level) + (delim if i > 0 or start == 0 else delim1) + item.strip() ) if post == "": post = "\n" generation = pre + replacement + post return generation def find_next_punctuation(text: str, start_idx=0): """ Find the index of the next punctuation mark. Args: text (`str`): String to examine start_idx (`int`, *optional*) Index where to start """ for i in range(start_idx, len(text)): if text[i] in [".", "?", "!", "\n"]: return i return None def truncate_repetitions(text: str, min_len: int = 30) -> str: """ Attempt to truncate repeating segments in the input string. This function looks for the longest repeating substring at the end of the input string and truncates it to appear only once. To be considered for removal, repetitions need to be continuous. Args: text (`str`): The input raw prediction to be truncated. min_len (int): The minimum length of the repeating segment. Returns: `str`: The input string with repeated segments truncated. """ text_lower = text.lower() text_length = len(text_lower) if text_length < 2 * min_len: return text # try to find a length at which the tail is repeating max_repetition_length = None for repetition_length in range(min_len, int(text_length / 2)): # check if there is a repetition at the end same = True for i in range(0, repetition_length): if text_lower[text_length - repetition_length - i - 1] != text_lower[text_length - i - 1]: same = False break if same: max_repetition_length = repetition_length if max_repetition_length is None: return text lcs = text_lower[-max_repetition_length:] # remove all but the last repetition substituted_text = text substituted_text_lower = text_lower while substituted_text_lower.endswith(lcs): substituted_text = substituted_text[:-max_repetition_length] substituted_text_lower = substituted_text_lower[:-max_repetition_length] # this is the tail with the repetitions repeating_tail = text_lower[len(substituted_text_lower) :] # add until next punctuation and make sure last sentence is not repeating substituted_text_lower_out = substituted_text_lower while True: sentence_end = find_next_punctuation(text_lower, len(substituted_text_lower_out)) sentence_start = find_next_punctuation(text_lower[::-1], len(substituted_text_lower_out)) if sentence_end and sentence_start: sentence = text_lower[sentence_start:sentence_end] substituted_text_lower_out = text_lower[: sentence_end + 1] if sentence in repeating_tail: break else: break text_out = text[: len(substituted_text_lower_out)] return text_out def remove_numbers(lines): def _clean(s): return re.sub(r"(?:[\d_]|\*\*)", "", s).strip() if isinstance(lines, str): return _clean(lines) out = [] for l in lines: out.append(_clean(l)) return out def get_slices(lines, clean_lines): """ Get slices of text based on specific criteria within the lines. This function identifies and returns slices of text from the input lines based on certain conditions. These conditions were chosen by the Nougat authors: - The slice is less than 200 characters long. - The slice is more than 3 characters long. - The slice does not start with "[MISSING_PAGE". - The slice is either the same as the next slice or the ratio of the two in terms of Levensthein distance is greater than 0.9. Args: lines (`List[str]`): The list of lines containing the text. clean_lines (`List[str]`): A cleaned version of the text (without numbers). Returns: `List[tuple]`: A list of tuples representing the start and end indices of text slices. """ indices = np.zeros(len(lines)) for i in range(len(lines) - 1): j = i + 1 while not clean_lines[j] and j < len(lines) - 1: j += 1 if ( len(clean_lines[i]) < 200 and len(clean_lines[i]) > 3 and len(clean_lines[j]) < 200 and len(clean_lines[j]) > 3 and not clean_lines[i].startswith("[MISSING_PAGE") and (clean_lines[i] == clean_lines[j] or ratio(clean_lines[i], clean_lines[j]) > 0.9) ): indices[i:j] = 1 ids = np.where(indices)[0] slices = [] if len(ids) == 0: return slices j0 = 0 for j, x in enumerate(np.diff(ids) > 3): if x: slices.append((ids[j0], ids[j] + 2)) j0 = j + 1 slices.append((ids[j0], ids[-1] + 2)) return [sli for sli in slices if sli[1] - sli[0] > 15] def remove_slice_from_lines(lines, clean_text, slice) -> str: """ Remove a slice of text from the lines based on specific criteria. This function identifies a slice of text within the lines and removes it based on certain conditions. Args: lines (list of str): The list of lines containing the text. clean_text (list of str): A cleaned version of the text (without numbers). slice (tuple): A tuple representing the start and end indices of the slice to be removed. Returns: str: The removed slice of text as a single string. """ base = clean_text[slice[0]] section = list(slice) check_start_flag = False # backwards pass, at most 5 lines for line_idx in range(max(0, slice[0] - 1), max(0, slice[0] - 5), -1): if not lines[line_idx]: continue if lines[line_idx] == "## References": section[0] = line_idx break elif ratio(base, remove_numbers(lines[line_idx])) < 0.9: section[0] = line_idx + 1 potential_ref = remove_numbers(lines[max(0, line_idx - 1)].partition("* [")[-1]) if len(potential_ref) >= 0.75 * len(base) and ratio(base, potential_ref) < 0.9: section[0] = line_idx check_start_flag = True break # forward pass, at most 5 lines for line_idx in range(min(len(lines), slice[1]), min(len(lines), slice[1] + 5)): if ratio(base, remove_numbers(lines[line_idx])) < 0.9: section[1] = line_idx break if len(lines) <= section[1]: section[1] = len(lines) - 1 to_delete = "\n".join(lines[section[0] : section[1] + 1]) # cut off next page content itera, iterb = enumerate(lines[section[1] - 1]), enumerate(lines[section[1]]) while True: try: (ia, a) = next(itera) while a.isnumeric(): (ia, a) = next(itera) (ib, b) = next(iterb) while b.isnumeric(): (ib, b) = next(iterb) if a != b: break except StopIteration: break if check_start_flag and "* [" in to_delete: to_delete = "* [" + to_delete.partition("* [")[-1] try: delta = len(lines[section[1]]) - ib - 1 if delta > 0: to_delete = to_delete[:-delta] except UnboundLocalError: pass return to_delete.strip() @add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class NougatTokenizerFast(PreTrainedTokenizerFast): """ Fast tokenizer for Nougat (backed by HuggingFace tokenizers library). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class mainly adds Nougat-specific methods for postprocessing the generated text. Args: vocab_file (`str`, *optional*): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that contains the vocabulary necessary to instantiate a tokenizer. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`): Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = None def __init__( self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token="<unk>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", **kwargs, ): super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs, ) self.vocab_file = vocab_file def remove_hallucinated_references(self, text: str) -> str: """ Remove hallucinated or missing references from the text. This function identifies and removes references that are marked as missing or hallucinated from the input text. Args: text (`str`): The input text containing references. Returns: `str`: The text with hallucinated references removed. """ lines = text.split("\n") if len(lines) == 0: return "" clean_lines = remove_numbers(lines) slices = get_slices(lines, clean_lines) to_delete = [] for slice in slices: to_delete.append(remove_slice_from_lines(lines, clean_lines, slice)) for to_delete in reversed(to_delete): text = text.replace(to_delete, "\n\n[MISSING_PAGE_POST]\n\n") text = re.sub( r"## References\n+\[MISSING_PAGE_POST(:\d+)?\]", "\n\n[MISSING_PAGE_POST\\1]", text, ) return text def correct_tables(self, generation: str) -> str: """ Takes a generated string and fixes tables/tabulars to make them match the markdown format needed. Args: generation (str): The generated text to be postprocessed. Returns: str: The postprocessed text. Example: ```python correct_tables("\\begin{table} \\begin{tabular}{l l} & \\ \\end{tabular} \\end{table}") "\\begin{table}\n\\begin{tabular}{l l} & \\ \\end{tabular}\n\\end{table}" ``` """ # remove obvious wrong tables for l in generation.split("\n"): if l.count("\\begin{tabular}") > 15 or l.count("\\multicolumn") > 60 or l.count("&") > 400: generation = generation.replace(l, "") # whitespace corrections generation = generation.replace("\\begin{table} \\begin{tabular}", "\\begin{table}\n\\begin{tabular}") generation = generation.replace("\\end{tabular} \\end{table}", "\\end{tabular}\n\\end{table}") generation = generation.replace("\\end{table} Tab", "\\end{table}\nTab") generation = re.sub(r"(^.+)\\begin{tab", r"\1\n\\begin{tab", generation, flags=re.M) # Remove left-aligned empty LaTeX tabular blocks. generation = generation.replace(r"\begin{tabular}{l l} & \\ \end{tabular}", "") # Remove tabulars with just 2 newline characters. generation = generation.replace("\\begin{tabular}{}\n\n\\end{tabular}", "") return generation def post_process_single(self, generation: str, fix_markdown: bool = True) -> str: """ Postprocess a single generated text. Regular expressions used here are taken directly from the Nougat article authors. These expressions are commented for clarity and tested end-to-end in most cases. Args: generation (str): The generated text to be postprocessed. fix_markdown (bool, optional): Whether to perform Markdown formatting fixes. Default is True. Returns: str: The postprocessed text. """ generation = re.sub( r"(?:\n|^)#+ \d*\W? ?(.{100,})", r"\n\1", generation ) # too long section titles probably are none generation = generation.strip() # Remove LaTeX left margin tag generation = generation.replace("\n* [leftmargin=*]\n", "\n") # Remove lines with markdown headings starting with #, with numerals, # and possibly roman numerals with trailing spaces and newlines generation = re.sub(r"^#+ (?:[\d+\.]+|[ixv\.]+)?\s*(?:$|\n\s*)", "", generation, flags=re.M) # most likely hallucinated titles lines = generation.split("\n") if lines[-1].startswith("#") and lines[-1].lstrip("#").startswith(" ") and len(lines) > 1: logger.info("Likely hallucinated title at the end of the page: " + lines[-1]) generation = "\n".join(lines[:-1]) # obvious repetition detection generation = truncate_repetitions(generation) # Reference corrections generation = self.remove_hallucinated_references(generation) # Remove lines starting with asterisks and numbers like "*[1]" and followed by capital letters and periods (ie too long references) generation = re.sub(r"^\* \[\d+\](\s?[A-W]\.+\s?){10,}.*$", "", generation, flags=re.M) # Remove empty brackets after a reference number in brackets. *[12][]ABC will become *[12]ABC generation = re.sub(r"^(\* \[\d+\])\[\](.*)$", r"\1\2", generation, flags=re.M) # Remove single characters before or after 2 new lines generation = re.sub(r"(^\w\n\n|\n\n\w$)", "", generation) # pmc math artifact correction generation = re.sub( r"([\s.,()])_([a-zA-Z0-9])__([a-zA-Z0-9]){1,3}_([\s.,:()])", r"\1\(\2_{\3}\)\4", generation, ) generation = re.sub(r"([\s.,\d])_([a-zA-Z0-9])_([\s.,\d;])", r"\1\(\2\)\3", generation) # footnote mistakes generation = re.sub( r"(\nFootnote .*?:) (?:footnotetext|thanks):\W*(.*(?:\n\n|$))", r"\1 \2", generation, ) # TODO Come up with footnote formatting inside a table generation = re.sub(r"\[FOOTNOTE:.+?\](.*?)\[ENDFOOTNOTE\]", "", generation) # itemize post processing generation = normalize_list_like_lines(generation) if generation.endswith((".", "}")): generation += "\n\n" if re.match(r"[A-Z0-9,;:]$", generation): # add space in case it there is a comma or word ending generation += " " elif generation.startswith(("#", "**", "\\begin")): generation = "\n\n" + generation elif generation.split("\n")[-1].startswith(("#", "Figure", "Table")): generation = generation + "\n\n" else: try: last_word = generation.split(" ")[-1] if last_word in nltk.corpus.words.words(): generation += " " except LookupError: # add space just in case. Will split words but better than concatenating them generation += " " # table corrections generation = self.correct_tables(generation) # Remove optional, empty square brackets after begin{array} generation = generation.replace("\\begin{array}[]{", "\\begin{array}{") # Remove empty or malformed LaTeX tabular blocks with 2 or more columns specified, with spaces and ampersands. generation = re.sub( r"\\begin{tabular}{([clr ]){2,}}\s*[& ]*\s*(\\\\)? \\end{tabular}", "", generation, ) # Remove lines containing "S.A.B." one or more times. Was included in Nougat's code. generation = re.sub(r"(\*\*S\. A\. B\.\*\*\n+){2,}", "", generation) # Remove markdown-style headers that are incomplete or empty on multiple lines. generation = re.sub(r"^#+( [\[\d\w])?$", "", generation, flags=re.M) # Remove lines with just one period. generation = re.sub(r"^\.\s*$", "", generation, flags=re.M) # Replace instances of three or more newlines with just two newlines. generation = re.sub(r"\n{3,}", "\n\n", generation) if fix_markdown: return markdown_compatible(generation) else: return generation def post_process_generation( self, generation: Union[str, List[str]], fix_markdown: bool = True, num_workers: int = None, ) -> Union[str, List[str]]: """ Postprocess a generated text or a list of generated texts. This function can be used to perform postprocessing on generated text, such as fixing Markdown formatting. Postprocessing is quite slow so it is recommended to use multiprocessing to speed up the process. Args: generation (Union[str, List[str]]): The generated text or a list of generated texts. fix_markdown (`bool`, *optional*, defaults to `True`): Whether to perform Markdown formatting fixes. num_workers (`int`, *optional*): Optional number of workers to pass to leverage multiprocessing (postprocessing several texts in parallel). Returns: Union[str, List[str]]: The postprocessed text or list of postprocessed texts. """ requires_backends(self, ["nltk", "levenshtein"]) if isinstance(generation, list): if num_workers is not None and isinstance(num_workers, int): with Pool(num_workers) as p: return p.map(partial(self.post_process_single, fix_markdown=fix_markdown), generation) else: return [self.post_process_single(s, fix_markdown=fix_markdown) for s in generation] else: return self.post_process_single(generation, fix_markdown=fix_markdown) __all__ = ["NougatTokenizerFast"]
transformers/src/transformers/models/nougat/tokenization_nougat_fast.py/0
{ "file_path": "transformers/src/transformers/models/nougat/tokenization_nougat_fast.py", "repo_id": "transformers", "token_count": 10422 }
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OLMoE model configuration""" from ...configuration_utils import PretrainedConfig from ...modeling_rope_utils import rope_config_validation class OlmoeConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`OlmoeModel`]. It is used to instantiate an OLMoE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [allenai/OLMoE-1B-7B-0924](https://huggingface.co/allenai/OLMoE-1B-7B-0924). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50304): Vocabulary size of the OLMoE model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OlmoeModel`] hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 2048): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 16): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 50279): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. clip_qkv (`float`, *optional*): If not `None`, elements of query, key and value attention states are clipped so that their absolute value does not exceed this value. num_experts_per_tok (`int`, *optional*, defaults to 8): Number of selected experts. num_experts (`int`, *optional*, defaults to 64): Number of routed experts. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabeling this will also allow the model to output the auxiliary loss, including load balancing loss and router z-loss. router_aux_loss_coef (`float`, *optional*, defaults to 0.01): The aux loss factor for the total loss. norm_topk_prob (`bool`, *optional*, defaults to `False`): Whether to normalize the topk probabilities. ```python >>> from transformers import OlmoeModel, OlmoeConfig >>> # Initializing a OLMoE 7B A1B style configuration >>> configuration = OlmoeConfig() >>> # Initializing a model from the OLMoE 7B A1B style configuration >>> model = OlmoeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "olmoe" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=50304, hidden_size=2048, intermediate_size=2048, num_hidden_layers=16, num_attention_heads=16, num_key_value_heads=None, hidden_act="silu", max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, clip_qkv=None, num_experts_per_tok=8, num_experts=64, output_router_logits=False, router_aux_loss_coef=0.01, norm_topk_prob=False, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.clip_qkv = clip_qkv self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef self.norm_topk_prob = norm_topk_prob # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, move it to 'rope_type'. if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self) super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["OlmoeConfig"]
transformers/src/transformers/models/olmoe/configuration_olmoe.py/0
{ "file_path": "transformers/src/transformers/models/olmoe/configuration_olmoe.py", "repo_id": "transformers", "token_count": 3534 }
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path): # Construct model if openai_config_file == "": config = OpenAIGPTConfig() else: config = OpenAIGPTConfig.from_json_file(openai_config_file) model = OpenAIGPTModel(config) # Load weights from numpy load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path) # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(model.state_dict(), pytorch_weights_dump_path) print(f"Save configuration file to {pytorch_config_dump_path}") with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--openai_checkpoint_folder_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--openai_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) args = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
transformers/src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 987 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for OWLv2 """ import warnings from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available if TYPE_CHECKING: from .modeling_owlv2 import Owlv2ImageGuidedObjectDetectionOutput, Owlv2ObjectDetectionOutput class Owlv2Processor(ProcessorMixin): r""" Constructs an Owlv2 processor which wraps [`Owlv2ImageProcessor`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that interits both the image processor and tokenizer functionalities. See the [`~OwlViTProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information. Args: image_processor ([`Owlv2ImageProcessor`]): The image processor is a required input. tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "Owlv2ImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self, image_processor, tokenizer, **kwargs): super().__init__(image_processor, tokenizer) # Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.__call__ with OwlViT->Owlv2 def __call__(self, text=None, images=None, query_images=None, padding="max_length", return_tensors="np", **kwargs): """ Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode: the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The query image to be prepared, one query image is expected per target image to be queried. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(text, str) or (isinstance(text, List) and not isinstance(text[0], List)): encodings = [self.tokenizer(text, padding=padding, return_tensors=return_tensors, **kwargs)] elif isinstance(text, List) and isinstance(text[0], List): encodings = [] # Maximum number of queries across batch max_num_queries = max([len(t) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(t) != max_num_queries: t = t + [" "] * (max_num_queries - len(t)) encoding = self.tokenizer(t, padding=padding, return_tensors=return_tensors, **kwargs) encodings.append(encoding) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": input_ids = np.concatenate([encoding["input_ids"] for encoding in encodings], axis=0) attention_mask = np.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp input_ids = jnp.concatenate([encoding["input_ids"] for encoding in encodings], axis=0) attention_mask = jnp.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0) elif return_tensors == "pt" and is_torch_available(): import torch input_ids = torch.cat([encoding["input_ids"] for encoding in encodings], dim=0) attention_mask = torch.cat([encoding["attention_mask"] for encoding in encodings], dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf input_ids = tf.stack([encoding["input_ids"] for encoding in encodings], axis=0) attention_mask = tf.stack([encoding["attention_mask"] for encoding in encodings], axis=0) else: raise ValueError("Target return tensor type could not be returned") encoding = BatchEncoding() encoding["input_ids"] = input_ids encoding["attention_mask"] = attention_mask if query_images is not None: encoding = BatchEncoding() query_pixel_values = self.image_processor( query_images, return_tensors=return_tensors, **kwargs ).pixel_values encoding["query_pixel_values"] = query_pixel_values if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) if text is not None and images is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif query_images is not None and images is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) # Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.post_process_object_detection with OwlViT->Owlv2 def post_process_object_detection(self, *args, **kwargs): """ This method forwards all its arguments to [`Owlv2ImageProcessor.post_process_object_detection`]. Please refer to the docstring of this method for more information. """ warnings.warn( "`post_process_object_detection` method is deprecated for OwlVitProcessor and will be removed in v5. " "Use `post_process_grounded_object_detection` instead.", FutureWarning, ) return self.image_processor.post_process_object_detection(*args, **kwargs) # Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.post_process_grounded_object_detection with OwlViT->Owlv2 def post_process_grounded_object_detection( self, outputs: "Owlv2ObjectDetectionOutput", threshold: float = 0.1, target_sizes: Optional[Union[TensorType, List[Tuple]]] = None, text_labels: Optional[List[List[str]]] = None, ): """ Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`Owlv2ObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. text_labels (`List[List[str]]`, *optional*): List of lists of text labels for each image in the batch. If unset, "text_labels" in output will be set to `None`. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "text_labels": The text labels for each predicted bounding box on the image. """ output = self.image_processor.post_process_object_detection( outputs=outputs, threshold=threshold, target_sizes=target_sizes ) if text_labels is not None and len(text_labels) != len(output): raise ValueError("Make sure that you pass in as many lists of text labels as images") # adding text labels to the output if text_labels is not None: for image_output, image_text_labels in zip(output, text_labels): object_text_labels = [image_text_labels[i] for i in image_output["labels"]] image_output["text_labels"] = object_text_labels else: for image_output in output: image_output["text_labels"] = None return output # Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.post_process_image_guided_detection with OwlViT->Owlv2 def post_process_image_guided_detection( self, outputs: "Owlv2ImageGuidedObjectDetectionOutput", threshold: float = 0.0, nms_threshold: float = 0.3, target_sizes: Optional[Union[TensorType, List[Tuple]]] = None, ): """ Converts the output of [`Owlv2ForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`Owlv2ImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "labels": Set to `None`. """ return self.image_processor.post_process_image_guided_detection( outputs=outputs, threshold=threshold, nms_threshold=nms_threshold, target_sizes=target_sizes ) # Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.batch_decode def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) # Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.decode def decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) __all__ = ["Owlv2Processor"]
transformers/src/transformers/models/owlv2/processing_owlv2.py/0
{ "file_path": "transformers/src/transformers/models/owlv2/processing_owlv2.py", "repo_id": "transformers", "token_count": 5896 }
# coding=utf-8 # Copyright 2023 IBM and HuggingFace Inc. team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch PatchTSMixer model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from transformers.modeling_utils import PreTrainedModel from transformers.utils import ModelOutput from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_patchtsmixer import PatchTSMixerConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "PatchTSMixerConfig" PATCHTSMIXER_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PatchTSMixerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. mask_input (`bool`, *optional*, defaults to `False`): If True, Masking will be enabled. False otherwise. """ PATCHTSMIXER_INPUTS_DOCSTRING = r""" Args: past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class PatchTSMixerGatedAttention(nn.Module): """ Module that applies gated attention to input data. Args: in_size (`int`): The input size. out_size (`int`): The output size. """ def __init__(self, in_size: int, out_size: int): super().__init__() self.attn_layer = nn.Linear(in_size, out_size) self.attn_softmax = nn.Softmax(dim=-1) def forward(self, inputs): attn_weight = self.attn_softmax(self.attn_layer(inputs)) inputs = inputs * attn_weight return inputs # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTBatchNorm with PatchTST->PatchTSMixer class PatchTSMixerBatchNorm(nn.Module): """ Compute batch normalization over the sequence length (time) dimension. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps) def forward(self, inputs: torch.Tensor): """ Parameters: inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`): input for Batch norm calculation Returns: `torch.Tensor` of shape `(batch_size, sequence_length, d_model)` """ output = inputs.transpose(1, 2) # output: (batch_size, d_model, sequence_length) output = self.batchnorm(output) return output.transpose(1, 2) class PatchTSMixerPositionalEncoding(nn.Module): """ Class for positional encoding """ def __init__(self, config: PatchTSMixerConfig): super().__init__() # positional encoding: [num_patches x d_model] if config.use_positional_encoding: self.position_enc = self._init_pe(config) else: self.position_enc = nn.Parameter(torch.zeros(config.num_patches, config.d_model)) @staticmethod def _init_pe(config: PatchTSMixerConfig) -> nn.Parameter: # Positional encoding if config.positional_encoding_type == "random": position_enc = nn.Parameter(torch.randn(config.num_patches, config.d_model), requires_grad=True) elif config.positional_encoding_type == "sincos": position_enc = torch.zeros(config.num_patches, config.d_model) position = torch.arange(0, config.num_patches).unsqueeze(1) div_term = torch.exp(torch.arange(0, config.d_model, 2) * -(math.log(10000.0) / config.d_model)) position_enc[:, 0::2] = torch.sin(position * div_term) position_enc[:, 1::2] = torch.cos(position * div_term) position_enc = position_enc - position_enc.mean() position_enc = position_enc / (position_enc.std() * 10) position_enc = nn.Parameter(position_enc, requires_grad=False) else: raise ValueError( f"{config.positional_encoding_type} is not a valid positional encoder. Available types are 'random' and 'sincos'." ) return position_enc def forward(self, patch_input: torch.Tensor): # hidden_state: [bs x num_channels x num_patches x d_model] hidden_state = patch_input + self.position_enc return hidden_state class PatchTSMixerNormLayer(nn.Module): """Normalization block Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm_mlp = config.norm_mlp if "batch" in config.norm_mlp.lower(): self.norm = PatchTSMixerBatchNorm(config) else: self.norm = nn.LayerNorm(config.d_model, eps=config.norm_eps) def forward(self, inputs: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): Input to the normalization layer. Returns: `torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))` """ if "batch" in self.norm_mlp.lower(): # reshape the data inputs_reshaped = torch.reshape( inputs, ( inputs.shape[0] * inputs.shape[1], inputs.shape[2], inputs.shape[3], ), ) # inputs_reshaped: [batch_size*num_channels, num_patches, d_model] # inputs_reshaped: [batch_size*num_channels, num_patches, d_model] inputs_reshaped = self.norm(inputs_reshaped) # put back data to the original shape inputs = torch.reshape(inputs_reshaped, inputs.shape) else: inputs = self.norm(inputs) return inputs class PatchTSMixerMLP(nn.Module): def __init__(self, in_features, out_features, config): super().__init__() num_hidden = in_features * config.expansion_factor self.fc1 = nn.Linear(in_features, num_hidden) self.dropout1 = nn.Dropout(config.dropout) self.fc2 = nn.Linear(num_hidden, out_features) self.dropout2 = nn.Dropout(config.dropout) def forward(self, inputs: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): Input to the MLP layer. Returns: `torch.Tensor` of the same shape as `inputs` """ inputs = self.dropout1(nn.functional.gelu(self.fc1(inputs))) inputs = self.fc2(inputs) inputs = self.dropout2(inputs) return inputs class PatchTSMixerChannelFeatureMixerBlock(nn.Module): """This module mixes the features in the channel dimension. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP( in_features=config.num_input_channels, out_features=config.num_input_channels, config=config, ) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention( in_size=config.num_input_channels, out_size=config.num_input_channels ) def forward(self, inputs: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): input to the MLP layer Returns: `torch.Tensor` of the same shape as `inputs` """ residual = inputs inputs = self.norm(inputs) inputs = inputs.permute(0, 3, 2, 1) if self.gated_attn: inputs = self.gating_block(inputs) inputs = self.mlp(inputs) inputs = inputs.permute(0, 3, 2, 1) out = inputs + residual return out # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PatchTSMixer class PatchTSMixerAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[PatchTSMixerConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class PatchMixerBlock(nn.Module): """This module mixes the patch dimension. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.self_attn = config.self_attn self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP( in_features=config.num_patches, out_features=config.num_patches, config=config, ) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.num_patches, out_size=config.num_patches) if config.self_attn: self.self_attn_layer = PatchTSMixerAttention( embed_dim=config.d_model, num_heads=config.self_attn_heads, dropout=config.dropout, ) self.norm_attn = PatchTSMixerNormLayer(config) def forward(self, hidden_state): """ Args: hidden_state (`torch.Tensor`): Input tensor. Returns: `torch.Tensor`: Transformed tensor. """ residual = hidden_state hidden_state = self.norm(hidden_state) if self.self_attn: batch_size, n_vars, num_patches, d_model = hidden_state.shape hidden_state_reshaped = hidden_state.reshape(batch_size * n_vars, num_patches, d_model) x_attn, _, _ = self.self_attn_layer(hidden_state_reshaped, output_attentions=False) x_attn = x_attn.reshape(batch_size, n_vars, num_patches, d_model) # Transpose so that num_patches is the last dimension hidden_state = hidden_state.transpose(2, 3) hidden_state = self.mlp(hidden_state) if self.gated_attn: hidden_state = self.gating_block(hidden_state) # Transpose back hidden_state = hidden_state.transpose(2, 3) if self.self_attn: hidden_state = self.norm_attn(hidden_state + x_attn) out = hidden_state + residual return out class FeatureMixerBlock(nn.Module): """This module mixes the hidden feature dimension. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP( in_features=config.d_model, out_features=config.d_model, config=config, ) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.d_model, out_size=config.d_model) def forward(self, hidden: torch.Tensor): """ Args: hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`): Input tensor to the layer. Returns: `torch.Tensor`: Transformed tensor. """ residual = hidden hidden = self.norm(hidden) hidden = self.mlp(hidden) if self.gated_attn: hidden = self.gating_block(hidden) out = hidden + residual return out class PatchTSMixerLayer(nn.Module): """ The `PatchTSMixer` layer that does all three kinds of mixing. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.patch_mixer = PatchMixerBlock(config=config) self.feature_mixer = FeatureMixerBlock(config=config) self.mode = config.mode if config.mode == "mix_channel": self.channel_feature_mixer = PatchTSMixerChannelFeatureMixerBlock(config=config) def forward(self, hidden: torch.Tensor): """ Args: hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`): Input tensor to the layer. Returns: `torch.Tensor`: Transformed tensor. """ if self.mode == "mix_channel": hidden = self.channel_feature_mixer(hidden) hidden = self.patch_mixer(hidden) hidden = self.feature_mixer(hidden) # hidden: (batch_size x num_patches x d_model) return hidden class PatchTSMixerBlock(nn.Module): """The main computing framework of the `PatchTSMixer` model. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() num_layers = config.num_layers self.mixers = nn.ModuleList([PatchTSMixerLayer(config=config) for _ in range(num_layers)]) def forward(self, hidden_state, output_hidden_states: bool = False): """ Args: hidden_state (`torch.Tensor`): The input tensor. output_hidden_states (`bool`, *optional*, defaults to False.): Whether to output the hidden states as well. Returns: `torch.Tensor`: The embedding. `list`: List of all hidden states if `output_hidden_states` is set to `True`. """ all_hidden_states = [] embedding = hidden_state for mod in self.mixers: embedding = mod(embedding) if output_hidden_states: all_hidden_states.append(embedding) if output_hidden_states: return embedding, all_hidden_states else: return embedding, None class PatchTSMixerForPredictionHead(nn.Module): """Prediction Head for Forecasting Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig, distribution_output=None): super().__init__() self.prediction_channel_indices = config.prediction_channel_indices if self.prediction_channel_indices is not None: self.prediction_channel_indices.sort() self.dropout_layer = nn.Dropout(config.head_dropout) if distribution_output is None: self.base_forecast_block = nn.Linear((config.num_patches * config.d_model), config.prediction_length) else: self.base_forecast_block = distribution_output.get_parameter_projection( config.num_patches * config.d_model ) self.flatten = nn.Flatten(start_dim=-2) def forward(self, hidden_features): """ Args: hidden_features (`torch.Tensor` of shape `(batch_size, num_patch, d_model)` in `flatten` mode or `(batch_size, n_vars, num_patch, d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size, prediction_length, nvars)`. """ hidden_features = self.flatten(hidden_features) # [batch_size x n_vars x num_patch * d_model] hidden_features = self.dropout_layer(hidden_features) # [batch_size x n_vars x num_patch * d_model] forecast = self.base_forecast_block(hidden_features) # [batch_size x n_vars x prediction_length] if isinstance(forecast, tuple): forecast = tuple(z.transpose(-1, -2) for z in forecast) else: forecast = forecast.transpose(-1, -2) # [batch_size x prediction_length x n_vars] if self.prediction_channel_indices is not None: if isinstance(forecast, tuple): forecast = tuple(z[..., self.prediction_channel_indices] for z in forecast) else: forecast = forecast[..., self.prediction_channel_indices] # [batch_size x prediction_length x n_vars] return forecast class PatchTSMixerLinearHead(nn.Module): """Linear head for Classification and Regression. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig, distribution_output=None): super().__init__() self.head_aggregation = config.head_aggregation self.output_range = config.output_range if config.head_aggregation is None: mul_factor = config.num_patches else: mul_factor = 1 self.distribution_output = distribution_output if distribution_output is None: self.projection = nn.Linear( config.d_model * config.num_input_channels * mul_factor, config.num_targets, ) else: self.projection = distribution_output.get_parameter_projection( config.d_model * config.num_input_channels * mul_factor ) if config.head_aggregation is None: self.flatten = nn.Flatten(start_dim=-3) else: self.flatten = nn.Flatten(start_dim=-2) self.dropout = nn.Dropout(config.head_dropout) def forward(self, hidden_features): """ Args: hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size x num_targets)`. """ # batch_size x d_model x num_patch or batch_size x n_vars x d_model x num_patch hidden_features = hidden_features.transpose(-1, -2) if self.head_aggregation == "use_last": # batch_size x d_model (flatten) or # batch_size x n_vars x d_model (common_channel) hidden_features = hidden_features[..., -1] elif self.head_aggregation == "max_pool": # batch_size x n_vars x d_model or batch_size x d_model hidden_features = hidden_features.max(dim=-1).values elif self.head_aggregation == "avg_pool": # batch_size x n_vars x d_model or batch_size x d_model hidden_features = hidden_features.mean(dim=-1) if self.flatten: hidden_features = self.flatten(hidden_features) hidden_features = self.dropout(hidden_features) hidden_features = self.projection(hidden_features) # batch_size x num_targets if (self.distribution_output is None) and (self.output_range is not None): hidden_features = ( torch.sigmoid(hidden_features) * (self.output_range[1] - self.output_range[0]) + self.output_range[0] ) return hidden_features class PatchTSMixerPreTrainedModel(PreTrainedModel): # Weight initialization config_class = PatchTSMixerConfig base_model_prefix = "model" main_input_name = "past_values" supports_gradient_checkpointing = False def _init_weights(self, module): """Initialize weights""" if isinstance(module, PatchTSMixerPositionalEncoding): # initialize positional encoding if self.config.positional_encoding_type == "random": nn.init.normal_(module.position_enc, mean=0.0, std=0.1) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm1d)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, PatchTSMixerBatchNorm): module.batchnorm.bias.data.zero_() module.batchnorm.weight.data.fill_(1.0) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() class PatchTSMixerPretrainHead(nn.Module): """Pretraining head. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dropout_layer = nn.Dropout(config.head_dropout) self.base_pt_block = nn.Linear(config.d_model, config.patch_length) def forward(self, hidden_features): """ Args: hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size x n_vars x num_patch x patch_length)`. """ hidden_features = self.dropout_layer(hidden_features) forecast = self.base_pt_block(hidden_features) # [batch_size x n_vars x num_patch x patch_length] return forecast # Copied from transformers.models.patchtst.modeling_patchtst.random_masking def random_masking( inputs: torch.Tensor, mask_ratio: float, unmasked_channel_indices: list = None, channel_consistent_masking: bool = False, mask_value: int = 0, ): """random_masking: Mask the input considering the control variables. Args: inputs (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, num_features)`): The input tensor to mask. mask_ratio (`float`): Masking ratio applied to mask the input data during random pretraining. It is the number between 0 and 1. unmasked_channel_indices (list, *optional*): Indices of channels that will not be masked. channel_consistent_masking (bool, *optional*, defaults to `False`): When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary across channels. mask_value (int, *optional*, defaults to 0): Define the value of masked patches for pretraining. Returns: `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as input Tensor and mask tensor of shape [bs x c x n] """ if mask_ratio < 0 or mask_ratio >= 1: raise ValueError(f"Mask ratio {mask_ratio} has to be between 0 and 1.") batch_size, num_channels, sequence_length, num_features = inputs.shape device = inputs.device len_keep = int(sequence_length * (1 - mask_ratio)) if channel_consistent_masking: noise = torch.rand(batch_size, 1, sequence_length, device=device) # noise in [0, 1], bs x 1 x L noise = noise.repeat(1, num_channels, 1) # bs x num_channels x time else: # noise in [0, 1], bs x num_channels x L noise = torch.rand(batch_size, num_channels, sequence_length, device=device) # mask: [bs x num_channels x num_patch] mask = torch.ones(batch_size, num_channels, sequence_length, device=device) mask[:, :, :len_keep] = 0 # sort noise for each sample ids_shuffle = torch.argsort(noise, dim=-1) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=-1) # ids_restore: [bs x num_channels x L] mask = torch.gather(mask, dim=-1, index=ids_restore) mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patches x patch_length] if unmasked_channel_indices is not None: mask[:, unmasked_channel_indices, :, :] = 0 inputs_mask = inputs.masked_fill(mask.bool(), mask_value) return inputs_mask, mask[..., 0] # Copied from transformers.models.patchtst.modeling_patchtst.forecast_masking def forecast_masking( inputs: torch.Tensor, num_forecast_mask_patches: Union[list, int], unmasked_channel_indices: list = None, mask_value: int = 0, ): """Forecast masking that masks the last K patches where K is from the num_forecast_mask_patches. If num_forecast_mask_patches is a list, samples in the batch will be randomly masked by numbers defined in the list. Parameters: inputs (`torch.Tensor`): Input of shape `(bs, num_channels, num_patch, patch_length)` num_forecast_mask_patches (`list`): Number of patches to be masked at the end of each batch sample. e.g. 4 or [3, 5]. unmasked_channel_indices (`list`, *optional*): Indices of channels that are not masked. mask_value (`int`, *optional*, defaults to 0): Values in the masked patches will be filled by `mask_value`. Returns: `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as inputs Tensor and Mask tensor of shape `(bs, num_channels , num_patch)` or `(bs, tsg1, tsg2, num_channels, num_patch)` """ if isinstance(num_forecast_mask_patches, int): num_forecast_mask_patches = [num_forecast_mask_patches] forecast_mask_ratios = [1 for _ in num_forecast_mask_patches] batch_size, num_channels, sequence_length, num_features = inputs.shape mask = torch.zeros(batch_size, num_channels, sequence_length, device=inputs.device) t_list = [] total_length = 0 total_ratio = sum(forecast_mask_ratios) for patch_length, ratio in zip(num_forecast_mask_patches, forecast_mask_ratios): if patch_length <= 0 or patch_length >= sequence_length: raise ValueError( f"num_forecast_mask_patches {patch_length} should be greater than 0 and less than total patches." ) temp_len = int(batch_size * ratio / total_ratio) t_list.append([patch_length, ratio, temp_len]) total_length += temp_len t_list = sorted(t_list, key=lambda x: x[2]) if total_length < batch_size: t_list[0][2] = t_list[0][2] + (batch_size - total_length) elif total_length > batch_size: t_list[-1][2] = t_list[-1][2] + (total_length - batch_size) batch1 = 0 for patch_len, _, temp_len in t_list: batch2 = batch1 + temp_len mask[batch1:batch2, :, -patch_len:] = 1 batch1 = batch2 perm = torch.randperm(mask.shape[0]) mask = mask[perm] mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patch x patch_len] if unmasked_channel_indices is not None: mask[:, unmasked_channel_indices, :, :] = 0 inputs_mask = inputs.masked_fill(mask.bool(), mask_value) return inputs_mask, mask[..., 0] # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTPatchify with PatchTST->PatchTSMixer class PatchTSMixerPatchify(nn.Module): """ A class to patchify the time series sequence into different patches Returns: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.sequence_length = config.context_length self.patch_length = config.patch_length self.patch_stride = config.patch_stride if self.sequence_length <= self.patch_length: raise ValueError( f"Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})" ) # get the number of patches self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1) self.sequence_start = self.sequence_length - new_sequence_length def forward(self, past_values: torch.Tensor): """ Parameters: past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*): Input for patchification Returns: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` """ sequence_length = past_values.shape[-2] if sequence_length != self.sequence_length: raise ValueError( f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length})." ) # output: [bs x new_sequence_length x num_channels] output = past_values[:, self.sequence_start :, :] # output: [bs x num_patches x num_input_channels x patch_length] output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride) # output: [bs x num_input_channels x num_patches x patch_length] output = output.transpose(-2, -3).contiguous() return output # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTMasking with PatchTST->PatchTSMixer class PatchTSMixerMasking(nn.Module): """ Class to perform random or forecast masking. Parameters: config (`PatchTSMixerConfig`): model config Returns: x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) Masked patched input mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) Bool tensor indicating True on masked points """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.random_mask_ratio = config.random_mask_ratio self.channel_consistent_masking = config.channel_consistent_masking self.mask_type = config.mask_type self.num_forecast_mask_patches = config.num_forecast_mask_patches self.unmasked_channel_indices = config.unmasked_channel_indices self.mask_value = config.mask_value if self.unmasked_channel_indices is not None: self.unmasked_channel_indices = sorted(self.unmasked_channel_indices) def forward(self, patch_input: torch.Tensor): """ Parameters: patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): Patch input Return: masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) Masked patched input mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) Bool tensor indicating True on masked points """ if self.mask_type == "random": masked_input, mask = random_masking( inputs=patch_input, mask_ratio=self.random_mask_ratio, unmasked_channel_indices=self.unmasked_channel_indices, channel_consistent_masking=self.channel_consistent_masking, mask_value=self.mask_value, ) elif self.mask_type == "forecast": masked_input, mask = forecast_masking( inputs=patch_input, num_forecast_mask_patches=self.num_forecast_mask_patches, unmasked_channel_indices=self.unmasked_channel_indices, mask_value=self.mask_value, ) else: raise ValueError(f"Invalid mask type {self.mask_type}.") # mask: [bs x num_input_channels x num_patch] mask = mask.bool() return masked_input, mask # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTStdScaler with PatchTST->PatchTSMixer class PatchTSMixerStdScaler(nn.Module): """ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by subtracting from the mean and dividing by the standard deviation. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5 def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTMeanScaler with PatchTST->PatchTSMixer class PatchTSMixerMeanScaler(nn.Module): """ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 self.default_scale = config.default_scale if hasattr(config, "default_scale") else None def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) scale = ts_sum / torch.clamp(num_observed, min=1) # If `default_scale` is provided, we use it, otherwise we use the scale # of the batch. if self.default_scale is None: batch_sum = ts_sum.sum(dim=0) batch_observations = torch.clamp(num_observed.sum(0), min=1) default_scale = torch.squeeze(batch_sum / batch_observations) else: default_scale = self.default_scale * torch.ones_like(scale) # apply default scale where there are no observations scale = torch.where(num_observed > 0, scale, default_scale) # ensure the scale is at least `self.minimum_scale` scale = torch.clamp(scale, min=self.minimum_scale) scaled_data = data / scale if not self.keepdim: scale = scale.squeeze(dim=self.dim) return scaled_data, torch.zeros_like(scale), scale # Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTNOPScaler with PatchTST->PatchTSMixer class PatchTSMixerNOPScaler(nn.Module): """ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale @dataclass class PatchTSMixerEncoderOutput(ModelOutput): """ Base class for `PatchTSMixerEncoderOutput`, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, d_model)`): Hidden-state at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class PatchTSMixerEncoder(PatchTSMixerPreTrainedModel): """ Encoder for PatchTSMixer which inputs patched time-series and outputs patched embeddings. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.use_return_dict = config.use_return_dict self.patcher = nn.Linear(config.patch_length, config.d_model) if config.use_positional_encoding: self.positional_encoder = PatchTSMixerPositionalEncoding(config=config) else: self.positional_encoder = None self.mlp_mixer_encoder = PatchTSMixerBlock(config=config) # Initialize weights and apply final processing if config.post_init: self.post_init() @replace_return_docstrings(output_type=PatchTSMixerEncoderOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, PatchTSMixerEncoderOutput]: r""" Args: past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: `torch.FloatTensor` of shape `(batch_size, n_vars, num_patches, d_model)` """ return_dict = return_dict if return_dict is not None else self.use_return_dict # flatten [bs x num_patch x d_model]. common_channel/mix_channel: [bs x n_vars x num_patch x d_model] patches = self.patcher(past_values) # add positional encoder if self.positional_encoder is not None: patches = self.positional_encoder(patches) last_hidden_state, hidden_states = self.mlp_mixer_encoder(patches, output_hidden_states=output_hidden_states) if not return_dict: return tuple( v for v in [ last_hidden_state, hidden_states, ] ) return PatchTSMixerEncoderOutput(last_hidden_state=last_hidden_state, hidden_states=hidden_states) @dataclass class PatchTSMixerModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, d_model)`): Hidden-state at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): Patched input data to the model. mask: (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`,*optional*): Bool Tensor indicating True in masked patches and False otherwise. loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`,*optional*): Gives the mean of the context window per channel. Used for revin denorm outside the model, if revin enabled. scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`,*optional*): Gives the std dev of the context window per channel. Used for revin denorm outside the model, if revin enabled. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None patch_input: torch.FloatTensor = None mask: Optional[torch.FloatTensor] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None @add_start_docstrings( "The PatchTSMixer Model for time-series forecasting.", PATCHTSMIXER_START_DOCSTRING, ) class PatchTSMixerModel(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig, mask_input: bool = False): super().__init__(config) self.use_return_dict = config.use_return_dict self.encoder = PatchTSMixerEncoder(config) self.patching = PatchTSMixerPatchify(config) if mask_input is True: self.masking = PatchTSMixerMasking(config) else: self.masking = None if config.scaling == "mean": self.scaler = PatchTSMixerMeanScaler(config) elif config.scaling == "std" or config.scaling is True: self.scaler = PatchTSMixerStdScaler(config) else: self.scaler = PatchTSMixerNOPScaler(config) # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> PatchTSMixerModelOutput: r""" observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Returns: """ return_dict = return_dict if return_dict is not None else self.use_return_dict mask = None if observed_mask is None: observed_mask = torch.ones_like(past_values) scaled_past_values, loc, scale = self.scaler(past_values, observed_mask) patched_x = self.patching(scaled_past_values) # [batch_size x num_input_channels x num_patch x patch_length enc_input = patched_x if self.masking is not None: enc_input, mask = self.masking(patched_x) # enc_input: [batch_size x num_input_channels x num_patch x patch_length] # mask: [batch_size x num_input_channels x num_patch] encoder_output = self.encoder( enc_input, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if isinstance(encoder_output, tuple): encoder_output = PatchTSMixerEncoderOutput(*encoder_output) if not return_dict: return tuple( v for v in [ encoder_output.last_hidden_state, encoder_output.hidden_states, patched_x, mask, loc, scale, ] ) return PatchTSMixerModelOutput( last_hidden_state=encoder_output.last_hidden_state, hidden_states=encoder_output.hidden_states, patch_input=patched_x, mask=mask, loc=loc, scale=scale, ) @dataclass class PatchTSMixerForPreTrainingOutput(ModelOutput): """ Output type of [`PatchTSMixerForPreTrainingOutput`]. Args: prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, patch_length)`): Prediction output from the pretrain head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss """ loss: Optional[torch.FloatTensor] = None prediction_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class PatchTSMixerForPretraining(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for mask pretraining. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config, mask_input=True) self.head = PatchTSMixerPretrainHead(config=config) self.masked_loss = config.masked_loss self.use_return_dict = config.use_return_dict # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForPreTrainingOutput: r""" observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ return_dict = return_dict if return_dict is not None else self.use_return_dict if self.masked_loss is True: loss = torch.nn.MSELoss(reduction="none") else: loss = torch.nn.MSELoss(reduction="mean") # past_values: tensor [batch_size x context_length x num_input_channels] model_output = self.model( past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # x.last_hidden_state: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) x_hat = self.head(model_output.last_hidden_state) # tensor [batch_size x nvars x num_patch x patch_length] if return_loss is True: loss_val = loss(x_hat, model_output.patch_input) else: loss_val = None # calculate masked_loss if self.masked_loss is True and loss_val is not None: loss_val = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10) if not return_dict: return tuple( v for v in [ loss_val, x_hat, model_output.last_hidden_state, model_output.hidden_states, ] ) return PatchTSMixerForPreTrainingOutput( loss=loss_val, prediction_outputs=x_hat, # tensor [batch_size x nvars x num_patch x patch_length] last_hidden_state=model_output.last_hidden_state, # x: [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, ) @dataclass class PatchTSMixerForPredictionOutput(ModelOutput): """ Output type of [`PatchTSMixerForPredictionOutput`]. Args: prediction_outputs (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_input_channels)`): Prediction output from the forecast head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss. loc (`torch.FloatTensor`, *optional* of shape `(batch_size, 1, num_input_channels)`): Input mean scale (`torch.FloatTensor`, *optional* of shape `(batch_size, 1, num_input_channels)`): Input std dev """ loss: Optional[torch.FloatTensor] = None prediction_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None loc: torch.FloatTensor = None scale: torch.FloatTensor = None @dataclass class SamplePatchTSMixerPredictionOutput(ModelOutput): """ Base class for time series model's predictions outputs that contains the sampled values from the chosen distribution. Args: sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length, number_channels)`): Sampled values from the chosen distribution. """ sequences: torch.FloatTensor = None @dataclass class SamplePatchTSMixerRegressionOutput(ModelOutput): """ Base class for time series model's predictions outputs that contains the sampled values from the chosen distribution. Args: sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, num_targets)` Sampled values from the chosen distribution. """ sequences: torch.FloatTensor = None # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: """ Computes the negative log likelihood loss from input distribution with respect to target. """ return -input.log_prob(target) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor: """ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero, meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`. Args: input_tensor (`torch.FloatTensor`): Input tensor, of which the average must be computed. weights (`torch.FloatTensor`, *optional*): Weights tensor, of the same shape as `input_tensor`. dim (`int`, *optional*): The dim along which to average `input_tensor`. Returns: `torch.FloatTensor`: The tensor with values averaged along the specified `dim`. """ if weights is not None: weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights else: return input_tensor.mean(dim=dim) class PatchTSMixerForPrediction(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for forecasting application. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.loss = config.loss self.use_return_dict = config.use_return_dict self.prediction_channel_indices = config.prediction_channel_indices self.num_parallel_samples = config.num_parallel_samples if config.loss == "mse": self.distribution_output = None else: dim = config.prediction_length distribution_output_map = { "student_t": StudentTOutput, "normal": NormalOutput, "negative_binomial": NegativeBinomialOutput, } output_class = distribution_output_map.get(config.distribution_output, None) if output_class is not None: self.distribution_output = output_class(dim=dim) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") self.model = PatchTSMixerModel(config) self.head = PatchTSMixerForPredictionHead( config=config, distribution_output=self.distribution_output, ) # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerForPredictionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForPredictionOutput: r""" observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). future_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting,: `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `future_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ if self.loss == "mse": loss = nn.MSELoss(reduction="mean") elif self.loss == "nll": loss = nll else: raise ValueError("Invalid loss function: Allowed values: mse and nll") return_dict = return_dict if return_dict is not None else self.use_return_dict # past_values: tensor [batch_size x context_length x num_input_channels] model_output = self.model( past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # model_output: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) # tensor [batch_size x prediction_length x num_input_channels] y_hat = self.head(model_output.last_hidden_state) loss_val = None if self.prediction_channel_indices is not None: if self.distribution_output: distribution = self.distribution_output.distribution( y_hat, loc=model_output.loc[..., self.prediction_channel_indices], scale=model_output.scale[..., self.prediction_channel_indices], ) if future_values is not None and return_loss is True: loss_val = loss( distribution, future_values[..., self.prediction_channel_indices], ) # take average of the loss loss_val = weighted_average(loss_val) else: y_hat = ( y_hat * model_output.scale[..., self.prediction_channel_indices] + model_output.loc[..., self.prediction_channel_indices] ) if future_values is not None and return_loss is True: loss_val = loss(y_hat, future_values[..., self.prediction_channel_indices]) else: if self.distribution_output: distribution = self.distribution_output.distribution( y_hat, loc=model_output.loc, scale=model_output.scale ) if future_values is not None and return_loss is True: loss_val = loss(distribution, future_values) loss_val = weighted_average(loss_val) else: y_hat = y_hat * model_output.scale + model_output.loc if future_values is not None and return_loss is True: loss_val = loss(y_hat, future_values) if self.prediction_channel_indices is not None: loc = model_output.loc[..., self.prediction_channel_indices] scale = model_output.scale[..., self.prediction_channel_indices] else: loc = model_output.loc scale = model_output.scale if not return_dict: return tuple( v for v in [ loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states, loc, scale, ] ) return PatchTSMixerForPredictionOutput( loss=loss_val, prediction_outputs=y_hat, # tensor [batch_size x prediction_length x num_input_channels] last_hidden_state=model_output.last_hidden_state, # x: [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, loc=loc, scale=scale, ) def generate( self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor] = None, ) -> SamplePatchTSMixerPredictionOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the future. observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Return: [`SamplePatchTSMixerPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, prediction_length, num_input_channels)`. """ # get number of samples num_parallel_samples = self.num_parallel_samples # get model output outputs = self( past_values=past_values, future_values=None, observed_mask=observed_mask, output_hidden_states=False, ) # get distribution distribution = self.distribution_output.distribution( outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale ) # get samples: list of [batch_size x prediction_length x num_channels] samples = [distribution.sample() for _ in range(num_parallel_samples)] # stack tensors samples = torch.stack(samples, dim=1) # [batch_size x num_samples x prediction_length x num_channels] return SamplePatchTSMixerPredictionOutput(sequences=samples) @dataclass class PatchTSMixerForTimeSeriesClassificationOutput(ModelOutput): """ Output type of [`PatchTSMixerForTimeSeriesClassificationOutput`]. Args: prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_labels)`): Prediction output from the classfication head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss. """ loss: Optional[torch.FloatTensor] = None prediction_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class PatchTSMixerForTimeSeriesClassification(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for classification application. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config) self.head = PatchTSMixerLinearHead( config=config, ) self.use_return_dict = config.use_return_dict if config.scaling in ["std", "mean", True]: self.inject_scale = InjectScalerStatistics4D(d_model=config.d_model, num_patches=config.num_patches) else: self.inject_scale = None # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings( output_type=PatchTSMixerForTimeSeriesClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, past_values: torch.Tensor, target_values: torch.Tensor = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForTimeSeriesClassificationOutput: r""" target_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting, `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `target_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. For a classification task, it has a shape of `(batch_size,)`. For a regression task, it has a shape of `(batch_size, num_targets)`. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ loss = torch.nn.CrossEntropyLoss() return_dict = return_dict if return_dict is not None else self.use_return_dict model_output = self.model( past_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # x: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) if self.inject_scale is not None: model_output.last_hidden_state = self.inject_scale( model_output.last_hidden_state, loc=model_output.loc, scale=model_output.scale, ) # x: [batch_size x nvars x num_patch x d_model] y_hat = self.head(model_output.last_hidden_state) # tensor [batch_size x n_labels] if target_values is not None and return_loss is True: loss_val = loss(y_hat, target_values) else: loss_val = None if not return_dict: return tuple( v for v in [ loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states, ] ) return PatchTSMixerForTimeSeriesClassificationOutput( loss=loss_val, prediction_outputs=y_hat, # tensor [batch_size x n_labels] last_hidden_state=model_output.last_hidden_state, # x: [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, ) @dataclass class PatchTSMixerForRegressionOutput(ModelOutput): """ Output type of [`PatchTSMixerForRegressionOutput`]. Args: regression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`): Prediction output from the regression head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss. """ loss: Optional[torch.FloatTensor] = None regression_outputs: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class InjectScalerStatistics4D(nn.Module): def __init__(self, d_model: int, num_patches: int, expansion: int = 2): super().__init__() self.inverse_trans_expansion = nn.Linear(d_model + 2, expansion * d_model) self.inverse_trans_compression = nn.Linear(expansion * d_model, d_model) self.map_scale_expansion = nn.Linear(2, 2 * expansion) self.map_scale_compression = nn.Linear(2 * expansion, 2) self.num_patches = num_patches def forward(self, inputs: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)`) loc (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`) scale (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`) Returns: `torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)` """ mean = loc.transpose(-1, -2) # [batch_size x n_channels x 1 ] mean = mean.unsqueeze(-2) # [batch_size x n_channels x 1 x 1] mean = mean.repeat(1, 1, self.num_patches, 1) # [batch_size x n_channels x num_patch x 1] stdev = scale.transpose(-1, -2) # [batch_size x n_channels x 1 ] stdev = stdev.unsqueeze(-2) # [batch_size x n_channels x 1 x 1] stdev = stdev.repeat(1, 1, self.num_patches, 1) # [batch_size x n_channels x num_patch x 1] concat_stats = torch.cat([mean, stdev], dim=-1) # [batch_size x n_channels x num_patch x 2] concat_stats = self.map_scale_expansion(concat_stats) # [batch_size x n_channels x num_patch x (2*expansion)] concat_stats = self.map_scale_compression(concat_stats) # [batch_size x n_channels x num_patch x 2] inputs = torch.cat([inputs, concat_stats], dim=-1) # [batch_size x channels x num_patch x d_model+2] inputs = self.inverse_trans_expansion(inputs) # [batch_size x channels x num_patch x (expansion*d_model)] inputs = self.inverse_trans_compression(inputs) # [batch_size x channels x num_patch x d_model] return inputs class PatchTSMixerForRegression(PatchTSMixerPreTrainedModel): r""" `PatchTSMixer` for regression application. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config) self.loss = config.loss self.distribution_output = config.distribution_output self.use_return_dict = config.use_return_dict self.num_parallel_samples = config.num_parallel_samples if config.loss == "mse": self.distribution_output = None else: distribution_output_map = { "student_t": StudentTOutput, "normal": NormalOutput, "negative_binomial": NegativeBinomialOutput, } output_class = distribution_output_map.get(config.distribution_output) if output_class is not None: self.distribution_output = output_class(dim=config.num_targets) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") if config.scaling in ["std", "mean", True]: self.inject_scale = InjectScalerStatistics4D(d_model=config.d_model, num_patches=config.num_patches) else: self.inject_scale = None self.head = PatchTSMixerLinearHead( config=config, distribution_output=self.distribution_output, ) # Initialize weights and apply final processing if config.post_init: self.post_init() @add_start_docstrings_to_model_forward(PATCHTSMIXER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=PatchTSMixerForRegressionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, target_values: torch.Tensor = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForRegressionOutput: r""" target_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting, `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `target_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. For a classification task, it has a shape of `(batch_size,)`. For a regression task, it has a shape of `(batch_size, num_targets)`. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. Returns: """ if self.loss == "mse": loss = nn.MSELoss(reduction="mean") elif self.loss == "nll": loss = nll else: raise ValueError("Invalid loss function: Allowed values: mse and nll") return_dict = return_dict if return_dict is not None else self.use_return_dict model_output = self.model( past_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # model_output: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) if self.inject_scale is not None: model_output.last_hidden_state = self.inject_scale( model_output.last_hidden_state, loc=model_output.loc, scale=model_output.scale, ) # x: [batch_size x nvars x num_patch x d_model] y_hat = self.head(model_output.last_hidden_state) # [batch_size x num_targets] if target_values is not None and return_loss is True: if self.distribution_output: if self.distribution_output == "negative_binomial" and torch.any(target_values < 0): raise Exception("target_values cannot be negative for negative_binomial distribution.") distribution = self.distribution_output.distribution(y_hat) # y_hat should be a 2-tuple, each with dimension [bs, num_targets] y_hat = tuple([item.view(-1, self.config.num_targets) for item in y_hat]) loss_val = loss(distribution, target_values) # take average of the loss loss_val = weighted_average(loss_val) else: loss_val = loss(y_hat, target_values) else: loss_val = None if not return_dict: return tuple( v for v in [ loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states, ] ) return PatchTSMixerForRegressionOutput( loss=loss_val, regression_outputs=y_hat, # tensor [batch_size x num_targets] last_hidden_state=model_output.last_hidden_state, # [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, ) def generate( self, past_values: torch.Tensor, ) -> SamplePatchTSMixerRegressionOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the target values. Return: [`SamplePatchTSMixerRegressionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, num_targets)`. """ # get number of samples num_parallel_samples = self.num_parallel_samples # get model output outputs = self( past_values=past_values, target_values=None, output_hidden_states=False, ) # get distribution distribution = self.distribution_output.distribution(outputs.regression_outputs) # get samples samples = [ distribution.sample() for _ in range(num_parallel_samples) ] # samples: list of [batch_size x num_targets] # stack tensors # [batch_size x num_samples x num_targets] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSMixerRegressionOutput(sequences=samples) __all__ = [ "PatchTSMixerPreTrainedModel", "PatchTSMixerModel", "PatchTSMixerForPretraining", "PatchTSMixerForPrediction", "PatchTSMixerForTimeSeriesClassification", "PatchTSMixerForRegression", ]
transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py/0
{ "file_path": "transformers/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py", "repo_id": "transformers", "token_count": 38156 }
# coding=utf-8 # Copyright Deepmind and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Perceiver model configuration""" from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging logger = logging.get_logger(__name__) class PerceiverConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PerceiverModel`]. It is used to instantiate an Perceiver model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Perceiver [deepmind/language-perceiver](https://huggingface.co/deepmind/language-perceiver) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_latents (`int`, *optional*, defaults to 256): The number of latents. d_latents (`int`, *optional*, defaults to 1280): Dimension of the latent embeddings. d_model (`int`, *optional*, defaults to 768): Dimension of the inputs. Should only be provided in case [*PerceiverTextPreprocessor*] is used or no preprocessor is provided. num_blocks (`int`, *optional*, defaults to 1): Number of blocks in the Transformer encoder. num_self_attends_per_block (`int`, *optional*, defaults to 26): The number of self-attention layers per block. num_self_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each self-attention layer in the Transformer encoder. num_cross_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each cross-attention layer in the Transformer encoder. qk_channels (`int`, *optional*): Dimension to project the queries + keys before applying attention in the cross-attention and self-attention layers of the encoder. Will default to preserving the dimension of the queries if not specified. v_channels (`int`, *optional*): Dimension to project the values before applying attention in the cross-attention and self-attention layers of the encoder. Will default to preserving the dimension of the queries if not specified. cross_attention_shape_for_attention (`str`, *optional*, defaults to `"kv"`): Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder. self_attention_widening_factor (`int`, *optional*, defaults to 1): Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder. cross_attention_widening_factor (`int`, *optional*, defaults to 1): Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_query_residual (`float`, *optional*, defaults to `True`): Whether to add a query residual in the cross-attention layer of the encoder. vocab_size (`int`, *optional*, defaults to 262): Vocabulary size for the masked language modeling model. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that the masked language modeling model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). image_size (`int`, *optional*, defaults to 56): Size of the images after preprocessing, for [`PerceiverForImageClassificationLearned`]. train_size (`List[int]`, *optional*, defaults to `[368, 496]`): Training size of the images for the optical flow model. num_frames (`int`, *optional*, defaults to 16): Number of video frames used for the multimodal autoencoding model. audio_samples_per_frame (`int`, *optional*, defaults to 1920): Number of audio samples per frame for the multimodal autoencoding model. samples_per_patch (`int`, *optional*, defaults to 16): Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model. output_shape (`List[int]`, *optional*, defaults to `[1, 16, 224, 224]`): Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal autoencoding model. This excludes the channel dimension. output_num_channels (`int`, *optional*, defaults to 512): Number of output channels for each modalitiy decoder. Example: ```python >>> from transformers import PerceiverModel, PerceiverConfig >>> # Initializing a Perceiver deepmind/language-perceiver style configuration >>> configuration = PerceiverConfig() >>> # Initializing a model from the deepmind/language-perceiver style configuration >>> model = PerceiverModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "perceiver" def __init__( self, num_latents=256, d_latents=1280, d_model=768, num_blocks=1, num_self_attends_per_block=26, num_self_attention_heads=8, num_cross_attention_heads=8, qk_channels=None, v_channels=None, cross_attention_shape_for_attention="kv", self_attention_widening_factor=1, cross_attention_widening_factor=1, hidden_act="gelu", attention_probs_dropout_prob=0.1, initializer_range=0.02, layer_norm_eps=1e-12, use_query_residual=True, vocab_size=262, max_position_embeddings=2048, image_size=56, train_size=[368, 496], num_frames=16, audio_samples_per_frame=1920, samples_per_patch=16, output_shape=[1, 16, 224, 224], output_num_channels=512, _label_trainable_num_channels=1024, **kwargs, ): super().__init__(**kwargs) self.num_latents = num_latents self.d_latents = d_latents self.d_model = d_model self.num_blocks = num_blocks self.num_self_attends_per_block = num_self_attends_per_block self.num_self_attention_heads = num_self_attention_heads self.num_cross_attention_heads = num_cross_attention_heads self.qk_channels = qk_channels self.v_channels = v_channels self.cross_attention_shape_for_attention = cross_attention_shape_for_attention self.self_attention_widening_factor = self_attention_widening_factor self.cross_attention_widening_factor = cross_attention_widening_factor self.hidden_act = hidden_act self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_query_residual = use_query_residual # masked language modeling attributes self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings # image classification attributes self.image_size = image_size # flow attributes self.train_size = train_size # multimodal autoencoding attributes self.num_frames = num_frames self.audio_samples_per_frame = audio_samples_per_frame self.samples_per_patch = samples_per_patch self.output_shape = output_shape self.output_num_channels = output_num_channels self._label_trainable_num_channels = _label_trainable_num_channels class PerceiverOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def atol_for_validation(self) -> float: return 1e-4 def generate_dummy_inputs( self, preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], batch_size: int = -1, seq_length: int = -1, num_choices: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, num_channels: int = 3, image_width: int = 40, image_height: int = 40, ) -> Mapping[str, Any]: # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(preprocessor, PreTrainedTokenizerBase): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = preprocessor.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence dummy_input = [" ".join(["a"]) * seq_length] * batch_size inputs = dict(preprocessor(dummy_input, return_tensors=framework)) inputs["inputs"] = inputs.pop("input_ids") return inputs elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) inputs = dict(preprocessor(images=dummy_input, return_tensors=framework)) inputs["inputs"] = inputs.pop("pixel_values") return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." ) __all__ = ["PerceiverConfig", "PerceiverOnnxConfig"]
transformers/src/transformers/models/perceiver/configuration_perceiver.py/0
{ "file_path": "transformers/src/transformers/models/perceiver/configuration_perceiver.py", "repo_id": "transformers", "token_count": 4659 }
# coding=utf-8 # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Phi-3 model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Phi3Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32064): Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Phi3Model`]. hidden_size (`int`, *optional*, defaults to 3072): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 8192): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. resid_pdrop (`float`, *optional*, defaults to 0.0): Dropout probability for mlp outputs. embd_pdrop (`int`, *optional*, defaults to 0.0): The dropout ratio for the embeddings. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio after computing the attention scores. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. original_max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model was trained with. This is used to determine the size of the original RoPE embeddings when using long scaling. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon value used for the RMSNorm. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`dict`, *optional*): The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size divided by the number of attention heads divided by 2. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 32000): The id of the "end-of-sequence" token. pad_token_id (`int`, *optional*, defaults to 32000): The id of the padding token. sliding_window (`int`, *optional*): Sliding window attention window size. If `None`, no sliding window is applied. Example: ```python >>> from transformers import Phi3Model, Phi3Config >>> # Initializing a Phi-3 style configuration >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct") >>> # Initializing a model from the configuration >>> model = Phi3Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "phi3" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.qkv_proj": "colwise_rep", # we need to replicate here due to the slicing of qkv "layers.*.self_attn.o_proj": "rowwise_rep", # we need to replicate here due to the slicing of qkv "layers.*.mlp.gate_up_proj": "colwise_rep", # we need to replicate here due to the `chunk` operation "layers.*.mlp.down_proj": "rowwise_rep", # we need to replicate here due to the `chunk` operation } def __init__( self, vocab_size=32064, hidden_size=3072, intermediate_size=8192, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, resid_pdrop=0.0, embd_pdrop=0.0, attention_dropout=0.0, hidden_act="silu", max_position_embeddings=4096, original_max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-5, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, bos_token_id=1, eos_token_id=32000, pad_token_id=32000, sliding_window=None, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attention_dropout = attention_dropout self.hidden_act = hidden_act self.max_position_embeddings = max_position_embeddings self.original_max_position_embeddings = original_max_position_embeddings self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self._rope_scaling_adjustment() self._rope_scaling_validation() self.sliding_window = sliding_window super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) def _rope_scaling_adjustment(self): """ Adjust the `type` of the `rope_scaling` configuration for backward compatibility. """ if self.rope_scaling is None: return rope_scaling_type = self.rope_scaling.get("type", None) # For backward compatibility if previous version used "su" or "yarn" if rope_scaling_type is not None and rope_scaling_type in ["su", "yarn"]: self.rope_scaling["type"] = "longrope" def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3: raise ValueError( "`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, " f"got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_short_factor = self.rope_scaling.get("short_factor", None) rope_scaling_long_factor = self.rope_scaling.get("long_factor", None) if rope_scaling_type is None or rope_scaling_type not in ["longrope"]: raise ValueError(f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}") if not ( isinstance(rope_scaling_short_factor, list) and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor) ): raise ValueError( f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}" ) if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2: raise ValueError( f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}" ) if not ( isinstance(rope_scaling_long_factor, list) and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor) ): raise ValueError( f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}" ) if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2: raise ValueError( f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}" ) __all__ = ["Phi3Config"]
transformers/src/transformers/models/phi3/configuration_phi3.py/0
{ "file_path": "transformers/src/transformers/models/phi3/configuration_phi3.py", "repo_id": "transformers", "token_count": 4499 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import regex as re import torch from mistral_common.tokens.tokenizers.mistral import MistralTokenizer from safetensors.torch import load_file as safe_load_file from transformers import ( LlavaConfig, LlavaForConditionalGeneration, MistralConfig, PixtralImageProcessor, PixtralProcessor, PixtralVisionConfig, ) """ # Here is how to get the original tokens! model_name = "mistralai/Pixtral-12B-2409" tok = MistralTokenizer.from_model(model_name) from mistral_common.protocol.instruct.request import ChatCompletionRequest, UserMessage, ImageChunk, TextChunk EXPECTED_TOKENS = tok.encode_chat_completion( ChatCompletionRequest( messages=[ UserMessage( content=[ TextChunk(text="Describe the images"), ] + [ImageChunk(image=img) for img in IMG_URLS] ) ], model="pixtral", ) ) assert tokenizer.decode(inputs["input_ids"][0]) == EXPECTED_TOKENS """ OLD_KEY_TO_NEW_KEY_MAPPING = { # Layer Normalization Weights r"vision_encoder.transformer.layers.(\d+).input_layernorm.weight": r"vision_tower.transformer.layers.\1.attention_norm.weight", r"vision_encoder.transformer.layers.(\d+).ffn_norm.weight": r"vision_tower.transformer.layers.\1.ffn_norm.weight", # Self Attention Projections r"vision_encoder.transformer.layers.(\d+).attention.wq.weight": r"vision_tower.transformer.layers.\1.attention.q_proj.weight", r"vision_encoder.transformer.layers.(\d+).attention.wk.weight": r"vision_tower.transformer.layers.\1.attention.k_proj.weight", r"vision_encoder.transformer.layers.(\d+).attention.wv.weight": r"vision_tower.transformer.layers.\1.attention.v_proj.weight", r"vision_encoder.transformer.layers.(\d+).attention.wo.weight": r"vision_tower.transformer.layers.\1.attention.o_proj.weight", # MLP Projections r"vision_encoder.transformer.layers.(\d+).feed_forward.w1.weight": r"vision_tower.transformer.layers.\1.feed_forward.gate_proj.weight", r"vision_encoder.transformer.layers.(\d+).feed_forward.w2.weight": r"vision_tower.transformer.layers.\1.feed_forward.down_proj.weight", r"vision_encoder.transformer.layers.(\d+).feed_forward.w3.weight": r"vision_tower.transformer.layers.\1.feed_forward.up_proj.weight", # Additional mappings r"vision_encoder": r"vision_tower", r"vision_language_adapter.w_in": r"multi_modal_projector.linear_1", r"vision_language_adapter.w_out": r"multi_modal_projector.linear_2", r"layers.(\d+).attention.wq.weight": r"language_model.model.layers.\1.self_attn.q_proj.weight", r"layers.(\d+).attention.wk.weight": r"language_model.model.layers.\1.self_attn.k_proj.weight", r"layers.(\d+).attention.wv.weight": r"language_model.model.layers.\1.self_attn.v_proj.weight", r"layers.(\d+).attention.wo.weight": r"language_model.model.layers.\1.self_attn.o_proj.weight", r"layers.(\d+).feed_forward.w1.weight": r"language_model.model.layers.\1.mlp.gate_proj.weight", r"layers.(\d+).feed_forward.w2.weight": r"language_model.model.layers.\1.mlp.down_proj.weight", r"layers.(\d+).feed_forward.w3.weight": r"language_model.model.layers.\1.mlp.up_proj.weight", r"layers.(\d+).ffn_norm.weight": r"language_model.model.layers.\1.post_attention_layernorm.weight", r"layers.(\d+).attention_norm.weight": r"language_model.model.layers.\1.input_layernorm.weight", r"tok_embeddings.weight": r"language_model.model.embed_tokens.weight", r"output.weight": r"language_model.lm_head.weight", r"norm.weight": r"language_model.model.norm.weight", } def convert_mistral_tokenizer(model_file): from transformers import LlamaTokenizer mistral_tokenizer = MistralTokenizer.from_file(model_file) vocab = mistral_tokenizer.instruct_tokenizer.tokenizer.vocab() control_token_ids = mistral_tokenizer.instruct_tokenizer.tokenizer._control_tokens all_special = [vocab[id] for id in control_token_ids] hf_tokenizer = LlamaTokenizer(model_file) # Do I need to exclude tokens that are already special? hf_tokenizer.add_special_tokens({"additional_special_tokens": all_special}) hf_tokenizer.model_input_names = ["input_ids", "attention_mask"] return hf_tokenizer def permute_for_rope(value, n_heads, config): dim1 = value.shape[0] dim2 = config.hidden_size return value.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2) def convert_dictionary(original_state_dict, vision_config, text_config): new_dict = {} all_keys = "\n" + "\n".join(original_state_dict.keys()) old_keys = all_keys for old, new in OLD_KEY_TO_NEW_KEY_MAPPING.items(): all_keys = re.sub(r"\n" + old, r"\n" + new, all_keys) OLD_TO_NEW = dict(zip(old_keys.split("\n"), all_keys.split("\n"))) for key, value in original_state_dict.items(): new_key = OLD_TO_NEW[key] if "vision_encoder" in key: _config = vision_config num_attention_heads = _config.num_attention_heads else: _config = text_config if "q_proj" in new_key: num_attention_heads = _config.num_attention_heads if "k_proj" in new_key: num_attention_heads = _config.num_key_value_heads if "q_proj" in new_key or "k_proj" in new_key: value = permute_for_rope(value, num_attention_heads, _config) new_dict[new_key] = value return new_dict MISTRAL_CONFIG_MAPPING = { "dim": "hidden_size", "hidden_dim": "intermediate_size", "n_kv_heads": "num_key_value_heads", "n_heads": "num_attention_heads", "n_layers": "num_hidden_layers", } def convert_mistral_model(input_dir, output_dir): vision_config = {} if os.path.isfile(f"{input_dir}/params.json"): with open(f"{input_dir}/params.json") as f: param_json = json.load(f) vision_config = param_json.pop("vision_encoder") for k, v in MISTRAL_CONFIG_MAPPING.items(): value = param_json.pop(k) param_json[v] = value if "hidden_act" not in vision_config: vision_config["hidden_act"] = "silu" text_config = MistralConfig( **param_json, hidden_act="silu", sliding_window=None, tie_word_embeddings=False, is_composition=True, rms_norm_eps=1e-5, ) else: text_config = MistralConfig( attention_dropout=0.0, bos_token_id=1, eos_token_id=2, head_dim=128, hidden_act="silu", hidden_size=5120, initializer_range=0.02, intermediate_size=14336, max_position_embeddings=1024000, model_type="mistral", num_attention_heads=32, num_hidden_layers=40, num_key_value_heads=8, rms_norm_eps=1e-05, rope_theta=1000000000.0, sliding_window=None, tie_word_embeddings=False, vocab_size=131072, ) adapter_bias = vision_config.pop("adapter_bias", True) vision_config = PixtralVisionConfig(**vision_config) config = LlavaConfig( vision_config, text_config, vision_feature_layer=-1, image_token_index=10, vision_feature_select_strategy="full", image_seq_length=1, multimodal_projector_bias=adapter_bias, ) config.architectures = ["LlavaForConditionalGeneration"] config.save_pretrained(output_dir) full_original_state_dict = {} safetensors_files = sorted([file for file in os.listdir(input_dir) if file.endswith(".safetensors")]) if len(safetensors_files) == 1: full_original_state_dict = safe_load_file(f"{input_dir}/consolidated.safetensors") else: for file in safetensors_files: loaded_dict = safe_load_file(f"{input_dir}/{file}") full_original_state_dict.update(loaded_dict) new_dict = convert_dictionary(full_original_state_dict, vision_config, text_config) with torch.device("meta"): model = LlavaForConditionalGeneration(config) model.load_state_dict(new_dict, strict=True, assign=True) model.save_pretrained(output_dir) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", help="Location of LLaMA weights, which contains tokenizer.model and model folders", required=True, ) parser.add_argument( "--output_dir", help="Location to write HF model and tokenizer", required=True, ) parser.add_argument( "--tokenizer_file", help="Location of the specific tokenizer model file to use.", required=True ) parser.add_argument( "--chat_template_file", help="Optional file containing a raw chat template. Will be set as the processor's chat template.", required=False, ) args = parser.parse_args() convert_mistral_model(args.input_dir, args.output_dir) tokenizer = convert_mistral_tokenizer(args.tokenizer_file) image_processor = PixtralImageProcessor() processor = PixtralProcessor(tokenizer=tokenizer, image_processor=image_processor, image_token="[IMG]") if args.chat_template_file: processor.chat_template = open(args.chat_template_file).read() processor.save_pretrained(args.output_dir) if __name__ == "__main__": main()
transformers/src/transformers/models/pixtral/convert_pixtral_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/pixtral/convert_pixtral_weights_to_hf.py", "repo_id": "transformers", "token_count": 4329 }
# coding=utf-8 # Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, # Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch PVT model.""" import collections import math from typing import Iterable, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_pvt import PvtConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "PvtConfig" _CHECKPOINT_FOR_DOC = "Zetatech/pvt-tiny-224" _EXPECTED_OUTPUT_SHAPE = [1, 50, 512] _IMAGE_CLASS_CHECKPOINT = "Zetatech/pvt-tiny-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->Pvt class PvtDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class PvtPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__( self, config: PvtConfig, image_size: Union[int, Iterable[int]], patch_size: Union[int, Iterable[int]], stride: int, num_channels: int, hidden_size: int, cls_token: bool = False, ): super().__init__() self.config = config image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.position_embeddings = nn.Parameter( torch.randn(1, num_patches + 1 if cls_token else num_patches, hidden_size) ) self.cls_token = nn.Parameter(torch.zeros(1, 1, hidden_size)) if cls_token else None self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=stride, stride=patch_size) self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(p=config.hidden_dropout_prob) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = height * width # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == self.config.image_size * self.config.image_size: return self.position_embeddings embeddings = embeddings.reshape(1, height, width, -1).permute(0, 3, 1, 2) interpolated_embeddings = F.interpolate(embeddings, size=(height, width), mode="bilinear") interpolated_embeddings = interpolated_embeddings.reshape(1, -1, height * width).permute(0, 2, 1) return interpolated_embeddings def forward(self, pixel_values: torch.Tensor) -> Tuple[torch.Tensor, int, int]: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) patch_embed = self.projection(pixel_values) *_, height, width = patch_embed.shape patch_embed = patch_embed.flatten(2).transpose(1, 2) embeddings = self.layer_norm(patch_embed) if self.cls_token is not None: cls_token = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_token, embeddings), dim=1) position_embeddings = self.interpolate_pos_encoding(self.position_embeddings[:, 1:], height, width) position_embeddings = torch.cat((self.position_embeddings[:, :1], position_embeddings), dim=1) else: position_embeddings = self.interpolate_pos_encoding(self.position_embeddings, height, width) embeddings = self.dropout(embeddings + position_embeddings) return embeddings, height, width class PvtSelfOutput(nn.Module): def __init__(self, config: PvtConfig, hidden_size: int): super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class PvtEfficientSelfAttention(nn.Module): """Efficient self-attention mechanism with reduction of the sequence [PvT paper](https://arxiv.org/abs/2102.12122).""" def __init__( self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float ): super().__init__() self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError( f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " f"heads ({self.num_attention_heads})" ) self.attention_head_size = int(self.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.sequences_reduction_ratio = sequences_reduction_ratio if sequences_reduction_ratio > 1: self.sequence_reduction = nn.Conv2d( hidden_size, hidden_size, kernel_size=sequences_reduction_ratio, stride=sequences_reduction_ratio ) self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) def transpose_for_scores(self, hidden_states: int) -> torch.Tensor: new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size) hidden_states = hidden_states.view(new_shape) return hidden_states.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False, ) -> Tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) if self.sequences_reduction_ratio > 1: batch_size, seq_len, num_channels = hidden_states.shape # Reshape to (batch_size, num_channels, height, width) hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) # Apply sequence reduction hidden_states = self.sequence_reduction(hidden_states) # Reshape back to (batch_size, seq_len, num_channels) hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1) hidden_states = self.layer_norm(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class PvtAttention(nn.Module): def __init__( self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float ): super().__init__() self.self = PvtEfficientSelfAttention( config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequences_reduction_ratio=sequences_reduction_ratio, ) self.output = PvtSelfOutput(config, hidden_size=hidden_size) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, height, width, output_attentions) attention_output = self.output(self_outputs[0]) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class PvtFFN(nn.Module): def __init__( self, config: PvtConfig, in_features: int, hidden_features: Optional[int] = None, out_features: Optional[int] = None, ): super().__init__() out_features = out_features if out_features is not None else in_features self.dense1 = nn.Linear(in_features, hidden_features) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(hidden_features, out_features) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class PvtLayer(nn.Module): def __init__( self, config: PvtConfig, hidden_size: int, num_attention_heads: int, drop_path: float, sequences_reduction_ratio: float, mlp_ratio: float, ): super().__init__() self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.attention = PvtAttention( config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequences_reduction_ratio=sequences_reduction_ratio, ) self.drop_path = PvtDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = PvtFFN(config=config, in_features=hidden_size, hidden_features=mlp_hidden_size) def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False): self_attention_outputs = self.attention( hidden_states=self.layer_norm_1(hidden_states), height=height, width=width, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] attention_output = self.drop_path(attention_output) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states)) mlp_output = self.drop_path(mlp_output) layer_output = hidden_states + mlp_output outputs = (layer_output,) + outputs return outputs class PvtEncoder(nn.Module): def __init__(self, config: PvtConfig): super().__init__() self.config = config # stochastic depth decay rule drop_path_decays = torch.linspace(0, config.drop_path_rate, sum(config.depths)).tolist() # patch embeddings embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append( PvtPatchEmbeddings( config=config, image_size=config.image_size if i == 0 else self.config.image_size // (2 ** (i + 1)), patch_size=config.patch_sizes[i], stride=config.strides[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], cls_token=i == config.num_encoder_blocks - 1, ) ) self.patch_embeddings = nn.ModuleList(embeddings) # Transformer blocks blocks = [] cur = 0 for i in range(config.num_encoder_blocks): # each block consists of layers layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append( PvtLayer( config=config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=drop_path_decays[cur + j], sequences_reduction_ratio=config.sequence_reduction_ratios[i], mlp_ratio=config.mlp_ratios[i], ) ) blocks.append(nn.ModuleList(layers)) self.block = nn.ModuleList(blocks) # Layer norms self.layer_norm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = pixel_values.shape[0] num_blocks = len(self.block) hidden_states = pixel_values for idx, (embedding_layer, block_layer) in enumerate(zip(self.patch_embeddings, self.block)): # first, obtain patch embeddings hidden_states, height, width = embedding_layer(hidden_states) # second, send embeddings through blocks for block in block_layer: layer_outputs = block(hidden_states, height, width, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if idx != num_blocks - 1: hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class PvtPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = PvtConfig base_model_prefix = "pvt" main_input_name = "pixel_values" _no_split_modules = [] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, nn.Linear): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, PvtPatchEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data, mean=0.0, std=self.config.initializer_range, ) if module.cls_token is not None: module.cls_token.data = nn.init.trunc_normal_( module.cls_token.data, mean=0.0, std=self.config.initializer_range, ) PVT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`~PvtConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PVT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PvtImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Pvt encoder outputting raw hidden-states without any specific head on top.", PVT_START_DOCSTRING, ) class PvtModel(PvtPreTrainedModel): def __init__(self, config: PvtConfig): super().__init__(config) self.config = config # hierarchical Transformer encoder self.encoder = PvtEncoder(config) # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(PVT_INPUTS_DOCSTRING.format("(batch_size, channels, height, width)")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """ Pvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, PVT_START_DOCSTRING, ) class PvtForImageClassification(PvtPreTrainedModel): def __init__(self, config: PvtConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.pvt = PvtModel(config) # Classifier head self.classifier = ( nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PVT_INPUTS_DOCSTRING.format("(batch_size, channels, height, width)")) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.Tensor], labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.pvt( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output[:, 0, :]) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["PvtForImageClassification", "PvtModel", "PvtPreTrainedModel"]
transformers/src/transformers/models/pvt/modeling_pvt.py/0
{ "file_path": "transformers/src/transformers/models/pvt/modeling_pvt.py", "repo_id": "transformers", "token_count": 12236 }
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RAG model implementation.""" import copy from dataclasses import dataclass from typing import Callable, List, Optional, Tuple, Union import torch from torch import nn from ...configuration_utils import PretrainedConfig from ...generation import BeamSearchScorer, GenerationConfig, LogitsProcessorList, StoppingCriteriaList from ...modeling_outputs import ModelOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "RagConfig" @dataclass class RetrievAugLMMarginOutput(ModelOutput): """ Base class for retriever augmented marginalized models outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None doc_scores: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None retrieved_doc_embeds: Optional[torch.FloatTensor] = None retrieved_doc_ids: Optional[torch.LongTensor] = None context_input_ids: Optional[torch.LongTensor] = None context_attention_mask: Optional[torch.LongTensor] = None question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class RetrievAugLMOutput(ModelOutput): """ Args: logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ logits: torch.FloatTensor = None doc_scores: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None retrieved_doc_embeds: Optional[torch.FloatTensor] = None retrieved_doc_ids: Optional[torch.LongTensor] = None context_input_ids: Optional[torch.LongTensor] = None context_attention_mask: Optional[torch.LongTensor] = None question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None class RagPreTrainedModel(PreTrainedModel): r""" RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al. RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a generator, the encoder and generator are trainable while the retriever is just an indexed dataset. """ config_class = RagConfig base_model_prefix = "rag" _supports_flash_attn_2 = True _supports_sdpa = True @classmethod def from_pretrained(cls, *args, **kwargs): # At the moment fast initialization is not supported # for composite models kwargs["_fast_init"] = False return super().from_pretrained(*args, **kwargs) @classmethod def from_pretrained_question_encoder_generator( cls, question_encoder_pretrained_model_name_or_path: str = None, generator_pretrained_model_name_or_path: str = None, retriever: RagRetriever = None, **kwargs, ) -> PreTrainedModel: r""" Instantiates an question encoder and a generator from one or two base classes of the library from pretrained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the question encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the generator. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. retriever ([`RagRetriever`], *optional*): The retriever to use. kwwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the question_encoder configuration, use the prefix *question_encoder_* for each configuration parameter. - To update the generator configuration, use the prefix *generator_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import RagModel >>> # initialize a RAG from two pretrained models. >>> model = RagModel.from_pretrained_question_encoder_generator( ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small" ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./rag") >>> # load fine-tuned model >>> model = RagModel.from_pretrained("./rag") ```""" kwargs_question_encoder = { argument[len("question_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("question_encoder_") } kwargs_generator = { argument[len("generator_") :]: value for argument, value in kwargs.items() if argument.startswith("generator_") } # remove question_encoder, generator kwargs from kwargs for key in kwargs_question_encoder.keys(): del kwargs["question_encoder_" + key] for key in kwargs_generator.keys(): del kwargs["generator_" + key] # Load and initialize the question_encoder and generator # The distinction between question_encoder and generator at the model level is made # by the value of the flag `is_generator` that we need to set correctly. question_encoder = kwargs_question_encoder.pop("model", None) if question_encoder is None: assert question_encoder_pretrained_model_name_or_path is not None, ( "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to" " be defined" ) from ..auto.modeling_auto import AutoModel if "config" not in kwargs_question_encoder: from ..auto.configuration_auto import AutoConfig question_encoder_config, kwargs_question_encoder = AutoConfig.from_pretrained( question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder, return_unused_kwargs=True, ) kwargs_question_encoder["config"] = question_encoder_config question_encoder = AutoModel.from_pretrained( question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder ) generator = kwargs_generator.pop("model", None) if generator is None: assert generator_pretrained_model_name_or_path is not None, ( "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has" " to be defined" ) from ..auto.modeling_auto import AutoModelForSeq2SeqLM if "config" not in kwargs_generator: from ..auto.configuration_auto import AutoConfig generator_config, kwargs_generator = AutoConfig.from_pretrained( generator_pretrained_model_name_or_path, **kwargs_generator, return_unused_kwargs=True ) kwargs_generator["config"] = generator_config generator = AutoModelForSeq2SeqLM.from_pretrained( generator_pretrained_model_name_or_path, **kwargs_generator ) # instantiate config with corresponding kwargs config = kwargs.get("config", None) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever) RAG_START_DOCSTRING = r""" RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator. The question encoder can be any *autoencoding* model, preferably [`DPRQuestionEncoder`], and the generator can be any *seq2seq* model, preferably [`BartForConditionalGeneration`]. The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`. It has been tested with [`DPRQuestionEncoder`] as the `question_encoder` and [`BartForConditionalGeneration`] or [`T5ForConditionalGeneration`] as the `generator`. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Args: config ([`RagConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. question_encoder ([`PreTrainedModel`]): An encoder model compatible with the faiss index encapsulated by the `retriever`. generator ([`PreTrainedModel`]): A seq2seq model used as the generator in the RAG architecture. retriever ([`RagRetriever`]): A retriever class encapsulating a faiss index queried to obtain context documents for current inputs. """ RAG_FORWARD_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*) Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`, *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs * sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the generator's encoder. Used by the ([`RagModel`]) model during decoding. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for generation tasks. `None` by default, construct as per instructions for the generator model you're using with your RAG instance. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. past_key_values (`tuple(tuple(torch.FloatTensor))`): Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used in the ([`RagTokenForGeneration`]) model during decoding. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores` has to be provided to the forward pass. `doc_scores` can be computed via `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`]. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_retrieved(`bool`, *optional*): Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask`. See returned tensors for more detail. n_docs (`int`, *optional*, defaults to `config.n_docs``) Number of documents to retrieve and/or number of documents for which to generate an answer. """ @add_start_docstrings_to_model_forward(RAG_START_DOCSTRING) class RagModel(RagPreTrainedModel): def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[PreTrainedModel] = None, generator: Optional[PreTrainedModel] = None, retriever: Optional[RagRetriever] = None, # or maybe just use a `set_retriever(...)` method **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an question_encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) else: assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}" super().__init__(config) if question_encoder is None: from ..auto.modeling_auto import AutoModel question_encoder = AutoModel.from_config(config.question_encoder) if generator is None: from ..auto.modeling_auto import AutoModelForSeq2SeqLM generator = AutoModelForSeq2SeqLM.from_config(config.generator) self.retriever = retriever if self.retriever is not None: assert isinstance( retriever, RagRetriever ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`" self.retriever = retriever self.question_encoder = question_encoder self.generator = generator self.ctx_encoder = None self.context_encoder_training = False @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=RetrievAugLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, doc_scores: Optional[torch.FloatTensor] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, n_docs: Optional[int] = None, ) -> Union[Tuple[torch.Tensor], RetrievAugLMOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, RagModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever) >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"]) ```""" n_docs = n_docs if n_docs is not None else self.config.n_docs use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved # whether retriever has to be used has_to_retrieve = ( self.retriever is not None and (context_input_ids is None or context_attention_mask is None or doc_scores is None) and encoder_outputs is None ) # encoder_outputs are pre-computed during RAG-token generation if encoder_outputs is None: if has_to_retrieve: question_enc_outputs = self.question_encoder( input_ids, attention_mask=attention_mask, return_dict=True ) question_encoder_last_hidden_state = question_enc_outputs[0] # hidden states of question encoder retriever_outputs = self.retriever( input_ids, question_encoder_last_hidden_state.cpu().detach().to(torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="pt", ) if self.context_encoder_training: ( context_input_ids, context_attention_mask, retrieved_doc_embeds, retrived_doc_input_ids, retrived_doc_attention_mask, retrieved_doc_ids, ) = ( retriever_outputs["context_input_ids"], retriever_outputs["context_attention_mask"], retriever_outputs["retrieved_doc_embeds"], retriever_outputs["tokenized_doc_ids"], retriever_outputs["tokenized_doc_attention_mask"], retriever_outputs["doc_ids"], ) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) retrived_doc_input_ids = retrived_doc_input_ids.to(input_ids) retrived_doc_attention_mask = retrived_doc_attention_mask.to(input_ids) retrieved_doc_embeds = self.ctx_encoder( retrived_doc_input_ids, attention_mask=retrived_doc_attention_mask, return_dict=True ).pooler_output retrieved_doc_embeds = retrieved_doc_embeds.view( -1, n_docs, question_encoder_last_hidden_state.shape[1] ) # reshaping # compute doc_scores involving ctx_encoder doc_scores = torch.bmm( question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2) ).squeeze(1) else: context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = ( retriever_outputs["context_input_ids"], retriever_outputs["context_attention_mask"], retriever_outputs["retrieved_doc_embeds"], retriever_outputs["doc_ids"], ) # set to correct device retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) # compute doc_scores doc_scores = torch.bmm( question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2) ).squeeze(1) else: assert context_input_ids is not None, ( "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can" " set a retriever using the `set_retriever(...)` function." ) assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) assert ( doc_scores is not None ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function." assert (doc_scores.shape[1] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) # Decoder input without context documents if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0) gen_outputs = self.generator( input_ids=context_input_ids, attention_mask=context_attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=True, ) if not has_to_retrieve: question_encoder_last_hidden_state = None question_enc_hidden_states = None question_enc_attentions = None retrieved_doc_embeds = None retrieved_doc_ids = None else: question_enc_hidden_states = question_enc_outputs.hidden_states question_enc_attentions = question_enc_outputs.attentions if not has_to_retrieve or not output_retrieved: # don't output retrieved docs context_input_ids = (None,) context_attention_mask = None retrieved_doc_embeds = None retrieved_doc_ids = None return RetrievAugLMOutput( logits=gen_outputs.logits, doc_scores=doc_scores, past_key_values=gen_outputs.past_key_values, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, retrieved_doc_embeds=retrieved_doc_embeds, retrieved_doc_ids=retrieved_doc_ids, question_encoder_last_hidden_state=question_encoder_last_hidden_state, question_enc_hidden_states=question_enc_hidden_states, question_enc_attentions=question_enc_attentions, generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, generator_enc_hidden_states=gen_outputs.encoder_hidden_states, generator_enc_attentions=gen_outputs.encoder_attentions, generator_dec_hidden_states=gen_outputs.decoder_hidden_states, generator_dec_attentions=gen_outputs.decoder_attentions, generator_cross_attentions=gen_outputs.cross_attentions, ) @add_start_docstrings_to_model_forward( """ A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass. """, RAG_START_DOCSTRING, ) class RagSequenceForGeneration(RagPreTrainedModel): def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[PreTrainedModel] = None, generator: Optional[PreTrainedModel] = None, retriever: Optional[RagRetriever] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel): self.rag.context_encoder_training = True self.rag.ctx_encoder = ctx_encoder @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, exclude_bos_score: Optional[bool] = None, reduce_loss: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, n_docs: Optional[int] = None, **kwargs, # needs kwargs for generation ) -> RetrievAugLMMarginOutput: r""" exclude_bos_score (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing the loss. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum` operation. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Legacy dictionary, which is required so that model can use *generate()* function. Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, RagSequenceForGeneration >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt") >>> input_ids = inputs["input_ids"] >>> labels = targets["input_ids"] >>> outputs = model(input_ids=input_ids, labels=labels) >>> # or use retriever separately >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True) >>> # 1. Encode >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") >>> doc_scores = torch.bmm( ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2) ... ).squeeze(1) >>> # 3. Forward to generator >>> outputs = model( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=labels, ... ) ```""" n_docs = n_docs if n_docs is not None else self.config.n_docs exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids=input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, ) loss = None if labels is not None: loss = self.get_nll( outputs.logits, outputs.doc_scores, decoder_input_ids, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, exclude_bos_score=exclude_bos_score, n_docs=n_docs, ) return RetrievAugLMMarginOutput( loss=loss, logits=outputs.logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, generator_cross_attentions=outputs.generator_cross_attentions, ) @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @torch.no_grad() def generate( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, do_deduplication: Optional[bool] = None, # defaults to True num_return_sequences: Optional[int] = None, # defaults to 1 num_beams: Optional[int] = None, # defaults to 1 n_docs: Optional[int] = None, **model_kwargs, ) -> torch.LongTensor: """ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation for more information on how to set other generate input parameters. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are returned by [`~RagRetriever.__call__`]. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`]. do_deduplication (`bool`, *optional*): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function, where we set `num_return_sequences` to `num_beams`. num_beams (`int`, *optional*, defaults to 1): Number of beams for beam search. 1 means no beam search. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs (`Dict[str, Any]`, *optional*): Additional kwargs will be passed to [`~generation.GenerationMixin.generate`]. Return: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ n_docs = n_docs if n_docs is not None else self.config.n_docs do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication num_doc_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) num_beams = num_beams if num_beams is not None else self.config.num_beams assert ( input_ids is not None or context_input_ids is not None ), " At least one of input_ids or context_input_ids must be given" if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] context_input_ids = self.retriever( input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="pt", )["context_input_ids"] # set to correct device context_input_ids = context_input_ids.to(input_ids) hypos = [] model_kwargs["num_beams"] = num_beams model_kwargs["num_return_sequences"] = num_beams model_kwargs["attention_mask"] = None batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs for index in range(batch_size): # first, generate beams from documents: generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len) output_sequences = self.generator.generate( generator_input_ids, **model_kwargs, ) # n_docs * n_beam, tgt_len if do_deduplication: # do_deduplication, max_output_len output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values())) num_candidates = output_sequences.shape[ 0 ] # after deduplication, this number can be less than n_docs*n_beam # then, run model forwards to get nll scores: if input_ids is not None: new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1) outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True) else: # input_ids is None, need context_input_ids/mask and doc_scores assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) individual_input_ids = generator_input_ids.repeat( num_candidates, 1 ) # (num_candidates*n_docs, max_len) individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs] individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1) individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs] individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs] outputs = self( context_input_ids=individual_input_ids, context_attention_mask=individual_attention_mask, doc_scores=individual_doc_scores, labels=output_sequences, exclude_bos_score=True, ) top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1] # add hypothesis hypos.append(output_sequences[top_cand_inds]) return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id) def get_nll( self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None ): # shift tokens left target = torch.cat( [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1 ) n_docs = n_docs if n_docs is not None else self.config.n_docs # bos_token_id is None for T5 bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all() def _mask_pads(ll, smooth_obj): pad_mask = target.eq(self.config.generator.pad_token_id) if pad_mask.any(): ll.masked_fill_(pad_mask, 0.0) smooth_obj.masked_fill_(pad_mask, 0.0) return ll.squeeze(-1), smooth_obj.squeeze(-1) # seq_logits dim = (batch*n_docs, tgt_len , #vocabs) seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view( seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1) ) # batch_size x n_docs x tgt_len x #vocab_size doc_logprobs = nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1) # RAG-sequence marginalization first_token_scores = seq_logprobs[:, :, :1, :] second_token_scores = seq_logprobs[:, :, 1:2, :] remainder = seq_logprobs[:, :, 2:, :] rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2) # calculate loss target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1) assert target.dim() == rag_logprobs.dim() ll = rag_logprobs.gather(dim=-1, index=target) smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits ll, smooth_obj = _mask_pads(ll, smooth_obj) # sum over tokens, exclude bos while scoring ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2) smooth_obj = smooth_obj.sum(2) ll = ll.logsumexp(1) # logsumexp over docs smooth_obj = smooth_obj.logsumexp(1) nll_loss = -ll smooth_loss = -smooth_obj if reduce_loss: nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / rag_logprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss @staticmethod def _cat_and_pad(tensors, pad_token_id): output = ( tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id) ) ind = 0 for t in tensors: output[ind : ind + t.shape[0], : t.shape[1]] = t ind += t.shape[0] return output @add_start_docstrings_to_model_forward( """ A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass. """, RAG_START_DOCSTRING, ) class RagTokenForGeneration(RagPreTrainedModel): def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[PreTrainedModel] = None, generator: Optional[PreTrainedModel] = None, retriever: Optional[RagRetriever] = None, **kwargs, ): assert config is not None or ( question_encoder is not None and generator is not None ), "Either a configuration or an encoder and a generator has to be provided." if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel): self.rag.context_encoder_training = True self.rag.ctx_encoder = ctx_encoder def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, doc_scores=None, n_docs=None, **kwargs, ): # Overwritten -- `do_marginalize` is explicitly set in the output if past_key_values is not None: # if past is defined use only last decoder_input_ids decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, "encoder_outputs": encoder_outputs, "doc_scores": doc_scores, "context_attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "past_key_values": past_key_values, "use_cache": use_cache, "do_marginalize": True, "n_docs": n_docs, } @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @staticmethod def _reorder_cache(past_key_values, beam_idx): """Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs""" def _reorder_stacked(hidden_states, new_order): n_docs = hidden_states.shape[0] // new_order.shape[0] hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:]) hidden_states = hidden_states.index_select(0, new_order) result = hidden_states.view(-1, *hidden_states.shape[2:]) return result reordered_past = () for layer_past in past_key_values: # get the correct batch idx from decoder layer's batch dim for cross and self-attn reordered_past += ( tuple(_reorder_stacked(past_state, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past def marginalize(self, seq_logits, doc_scores, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # RAG-token marginalization seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view( seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1) ) doc_logprobs = torch.log_softmax(doc_scores, dim=1) log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1) return torch.logsumexp(log_prob_sum, dim=1) @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, do_marginalize: Optional[bool] = None, reduce_loss: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, n_docs: Optional[int] = None, **kwargs, # needs kwargs for generation ) -> RetrievAugLMMarginOutput: r""" do_marginalize (`bool`, *optional*): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum` operation. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Legacy dictionary, which is required so that model can use *generate()* function. Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, RagTokenForGeneration >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt") >>> input_ids = inputs["input_ids"] >>> labels = targets["input_ids"] >>> outputs = model(input_ids=input_ids, labels=labels) >>> # or use retriever separately >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True) >>> # 1. Encode >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") >>> doc_scores = torch.bmm( ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2) ... ).squeeze(1) >>> # 3. Forward to generator >>> outputs = model( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=labels, ... ) >>> # or directly generate >>> generated = model.generate( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... ) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) ```""" n_docs = n_docs if n_docs is not None else self.config.n_docs do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids=input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, ) loss = None logits = outputs.logits if labels is not None: assert decoder_input_ids is not None loss = self.get_nll( outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs, ) if do_marginalize: logits = self.marginalize(logits, outputs.doc_scores, n_docs) return RetrievAugLMMarginOutput( loss=loss, logits=logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, generator_cross_attentions=outputs.generator_cross_attentions, ) @torch.no_grad() def generate( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, n_docs: Optional[int] = None, generation_config: Optional[GenerationConfig] = None, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]] = None, logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(), stopping_criteria: Optional[StoppingCriteriaList] = StoppingCriteriaList(), **kwargs, ) -> torch.LongTensor: """ Implements RAG token decoding. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which has the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments `inputs_ids` and the batch ID `batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on the previously generated tokens `inputs_ids` and the batch ID `batch_id`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904). logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a model's config. If a stopping criteria is passed that is already created with the arguments or a model's config an error is thrown. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. Return: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ # Handle `generation_config` and kwargs that might update it if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None self._prepare_special_tokens(generation_config, kwargs_has_attention_mask) # set default parameters n_docs = n_docs if n_docs is not None else self.config.n_docs # retrieve docs if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] out = self.retriever( input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="pt", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) # set to correct device retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) # compute doc_scores doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze( 1 ) assert (context_input_ids.shape[0] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) # batch_size batch_size = context_input_ids.shape[0] // n_docs encoder = self.rag.generator.get_encoder() encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True) input_ids = torch.full( (batch_size * generation_config.num_beams, 1), generation_config.decoder_start_token_id, dtype=torch.long, device=next(self.parameters()).device, ) input_ids_seq_length = input_ids.shape[-1] last_hidden_state = encoder_outputs["last_hidden_state"] def extend_enc_output(tensor, num_beams=None): # split into `batch_size`, `num_beams`, `num_docs` tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:]) # repeat same last hidden states over `num_beams` dimension tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:]) # merge `batch_size`, `num_beams`, `num_docs` dims again return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:]) # correctly extend last_hidden_state and attention mask context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams) encoder_outputs["last_hidden_state"] = extend_enc_output( last_hidden_state, num_beams=generation_config.num_beams ) doc_scores = doc_scores.repeat_interleave(generation_config.num_beams, dim=0) # define start_len & additional parameters model_kwargs["doc_scores"] = doc_scores model_kwargs["encoder_outputs"] = encoder_outputs model_kwargs["attention_mask"] = context_attention_mask model_kwargs["n_docs"] = n_docs pre_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_seq_length, encoder_input_ids=context_input_ids, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, logits_processor=logits_processor, device=input_ids.device, ) prepared_stopping_criteria = self._get_stopping_criteria( generation_config=generation_config, stopping_criteria=stopping_criteria ) if generation_config.num_beams == 1: if generation_config.num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" " greedy search." ) return self._sample( input_ids, logits_processor=pre_processor, stopping_criteria=prepared_stopping_criteria, generation_config=generation_config, synced_gpus=False, streamer=None, **model_kwargs, ) elif generation_config.num_beams > 1: if generation_config.num_return_sequences > generation_config.num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=generation_config.num_beams, device=self.device, length_penalty=generation_config.length_penalty, do_early_stopping=generation_config.early_stopping, num_beam_hyps_to_keep=generation_config.num_return_sequences, max_length=generation_config.max_length, ) return self._beam_search( input_ids, beam_scorer, logits_processor=pre_processor, stopping_criteria=prepared_stopping_criteria, generation_config=generation_config, synced_gpus=False, **model_kwargs, ) else: raise ValueError( f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}" ) def get_input_embeddings(self): return self.rag.generator.get_input_embeddings() def get_output_embeddings(self): return self.rag.generator.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.rag.generator.set_output_embeddings(new_embeddings) def shift_tokens_right(self, input_ids, start_token_id=None): """Shift input ids one token to the right, and pad with start_token_id""" if start_token_id is None: start_token_id = self.config.decoder_start_token_id shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = start_token_id return shifted_input_ids def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # shift tokens left target = torch.cat( [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1 ) def _mask_pads(ll, smooth_obj): pad_mask = target.eq(self.config.generator.pad_token_id) if pad_mask.any(): ll.masked_fill_(pad_mask, 0.0) smooth_obj.masked_fill_(pad_mask, 0.0) return ll.squeeze(-1), smooth_obj.squeeze(-1) rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs) target = target.unsqueeze(-1) assert target.dim() == rag_logprobs.dim() ll = rag_logprobs.gather(dim=-1, index=target) smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits ll, smooth_obj = _mask_pads(ll, smooth_obj) ll = ll.sum(1) # sum over tokens smooth_obj = smooth_obj.sum(1) nll_loss = -ll smooth_loss = -smooth_obj if reduce_loss: nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / rag_logprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss __all__ = ["RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration"]
transformers/src/transformers/models/rag/modeling_rag.py/0
{ "file_path": "transformers/src/transformers/models/rag/modeling_rag.py", "repo_id": "transformers", "token_count": 36410 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RegNet 10B checkpoints vissl.""" # You need to install a specific version of classy vision # pip install git+https://github.com/FrancescoSaverioZuppichini/ClassyVision.git@convert_weights import argparse import json import os import re from collections import OrderedDict from dataclasses import dataclass, field from functools import partial from pathlib import Path from pprint import pprint from typing import Dict, List, Tuple import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams from huggingface_hub import hf_hub_download from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.modeling_utils import PreTrainedModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger() @dataclass class Tracker: module: nn.Module traced: List[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) name2module: Dict[str, nn.Module] = field(default_factory=OrderedDict) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor, name: str): has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) if has_not_submodules: self.traced.append(m) self.name2module[name] = m def __call__(self, x: Tensor): for name, m in self.module.named_modules(): self.handles.append(m.register_forward_hook(partial(self._forward_hook, name=name))) self.module(x) [x.remove() for x in self.handles] return self @property def parametrized(self): # check the len of the state_dict keys to see if we have learnable params return {k: v for k, v in self.name2module.items() if len(list(v.state_dict().keys())) > 0} class FakeRegNetVisslWrapper(nn.Module): """ Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file. """ def __init__(self, model: nn.Module): super().__init__() feature_blocks: List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem)) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block"), f"Unexpected layer name {k}" block_index = len(feature_blocks) + 1 feature_blocks.append((f"res{block_index}", v)) self._feature_blocks = nn.ModuleDict(feature_blocks) def forward(self, x: Tensor): return get_trunk_forward_outputs( x, out_feat_keys=None, feature_blocks=self._feature_blocks, ) class FakeRegNetParams(RegNetParams): """ Used to instantiace a RegNet model from classy vision with the same depth as the 10B one but with super small parameters, so we can trace it in memory. """ def get_expanded_params(self): return [(8, 2, 2, 8, 1.0), (8, 2, 7, 8, 1.0), (8, 2, 17, 8, 1.0), (8, 2, 1, 8, 1.0)] def get_from_to_our_keys(model_name: str) -> Dict[str, str]: """ Returns a dictionary that maps from original model's key -> our implementation's keys """ # create our model (with small weights) our_config = RegNetConfig(depths=[2, 7, 17, 1], hidden_sizes=[8, 8, 8, 8], groups_width=8) if "in1k" in model_name: our_model = RegNetForImageClassification(our_config) else: our_model = RegNetModel(our_config) # create from model (with small weights) from_model = FakeRegNetVisslWrapper( RegNet(FakeRegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52)) ) with torch.no_grad(): from_model = from_model.eval() our_model = our_model.eval() x = torch.randn((1, 3, 32, 32)) # trace both dest_tracker = Tracker(our_model) dest_traced = dest_tracker(x).parametrized pprint(dest_tracker.name2module) src_tracker = Tracker(from_model) src_traced = src_tracker(x).parametrized # convert the keys -> module dict to keys -> params def to_params_dict(dict_with_modules): params_dict = OrderedDict() for name, module in dict_with_modules.items(): for param_name, param in module.state_dict().items(): params_dict[f"{name}.{param_name}"] = param return params_dict from_to_ours_keys = {} src_state_dict = to_params_dict(src_traced) dst_state_dict = to_params_dict(dest_traced) for (src_key, src_param), (dest_key, dest_param) in zip(src_state_dict.items(), dst_state_dict.items()): from_to_ours_keys[src_key] = dest_key logger.info(f"{src_key} -> {dest_key}") # if "in1k" was in the model_name it means it must have a classification head (was finetuned) if "in1k" in model_name: from_to_ours_keys["0.clf.0.weight"] = "classifier.1.weight" from_to_ours_keys["0.clf.0.bias"] = "classifier.1.bias" return from_to_ours_keys def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True): filename = "imagenet-1k-id2label.json" num_labels = 1000 repo_id = "huggingface/label-files" num_labels = num_labels id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label label2id = {v: k for k, v in id2label.items()} ImageNetPreTrainedConfig = partial(RegNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) names_to_config = { "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), # finetuned on imagenet "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), } # add seer weights logic def load_using_classy_vision(checkpoint_url: str) -> Tuple[Dict, Dict]: files = torch.hub.load_state_dict_from_url(checkpoint_url, model_dir=str(save_directory), map_location="cpu") # check if we have a head, if yes add it model_state_dict = files["classy_state_dict"]["base_model"]["model"] return model_state_dict["trunk"], model_state_dict["heads"] names_to_from_model = { "regnet-y-10b-seer": partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch", ), "regnet-y-10b-seer-in1k": partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch", ), } from_to_ours_keys = get_from_to_our_keys(model_name) if not (save_directory / f"{model_name}.pth").exists(): logger.info("Loading original state_dict.") from_state_dict_trunk, from_state_dict_head = names_to_from_model[model_name]() from_state_dict = from_state_dict_trunk if "in1k" in model_name: # add the head from_state_dict = {**from_state_dict_trunk, **from_state_dict_head} logger.info("Done!") converted_state_dict = {} not_used_keys = list(from_state_dict.keys()) regex = r"\.block.-part." # this is "interesting", so the original checkpoints have `block[0,1]-part` in each key name, we remove it for key in from_state_dict.keys(): # remove the weird "block[0,1]-part" from the key src_key = re.sub(regex, "", key) # now src_key from the model checkpoints is the one we got from the original model after tracing, so use it to get the correct destination key dest_key = from_to_ours_keys[src_key] # store the parameter with our key converted_state_dict[dest_key] = from_state_dict[key] not_used_keys.remove(key) # check that all keys have been updated assert len(not_used_keys) == 0, f"Some keys where not used {','.join(not_used_keys)}" logger.info(f"The following keys were not used: {','.join(not_used_keys)}") # save our state dict to disk torch.save(converted_state_dict, save_directory / f"{model_name}.pth") del converted_state_dict else: logger.info("The state_dict was already stored on disk.") if push_to_hub: logger.info(f"Token is {os.environ['HF_TOKEN']}") logger.info("Loading our model.") # create our model our_config = names_to_config[model_name] our_model_func = RegNetModel if "in1k" in model_name: our_model_func = RegNetForImageClassification our_model = our_model_func(our_config) # place our model to the meta device (so remove all the weights) our_model.to(torch.device("meta")) logger.info("Loading state_dict in our model.") # load state dict state_dict_keys = our_model.state_dict().keys() PreTrainedModel._load_pretrained_model_low_mem( our_model, state_dict_keys, [save_directory / f"{model_name}.pth"] ) logger.info("Finally, pushing!") # push it to hub our_model.push_to_hub( repo_path_or_name=save_directory / model_name, commit_message="Add model", output_dir=save_directory / model_name, ) size = 384 # we can use the convnext one image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k", size=size) image_processor.push_to_hub( repo_path_or_name=save_directory / model_name, commit_message="Add image processor", output_dir=save_directory / model_name, ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported regnet* architecture," " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) args = parser.parse_args() pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py", "repo_id": "transformers", "token_count": 4909 }
# coding=utf-8 # Copyright 2022 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ResNet model.""" import math from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "ResNetConfig" # Base docstring _CHECKPOINT_FOR_DOC = "microsoft/resnet-50" _EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "microsoft/resnet-50" _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" class ResNetConvLayer(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, activation: str = "relu" ): super().__init__() self.convolution = nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, bias=False ) self.normalization = nn.BatchNorm2d(out_channels) self.activation = ACT2FN[activation] if activation is not None else nn.Identity() def forward(self, input: Tensor) -> Tensor: hidden_state = self.convolution(input) hidden_state = self.normalization(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state class ResNetEmbeddings(nn.Module): """ ResNet Embeddings (stem) composed of a single aggressive convolution. """ def __init__(self, config: ResNetConfig): super().__init__() self.embedder = ResNetConvLayer( config.num_channels, config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act ) self.pooler = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.num_channels = config.num_channels def forward(self, pixel_values: Tensor) -> Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embedding = self.embedder(pixel_values) embedding = self.pooler(embedding) return embedding class ResNetShortCut(nn.Module): """ ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ def __init__(self, in_channels: int, out_channels: int, stride: int = 2): super().__init__() self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False) self.normalization = nn.BatchNorm2d(out_channels) def forward(self, input: Tensor) -> Tensor: hidden_state = self.convolution(input) hidden_state = self.normalization(hidden_state) return hidden_state class ResNetBasicLayer(nn.Module): """ A classic ResNet's residual layer composed by two `3x3` convolutions. """ def __init__(self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu"): super().__init__() should_apply_shortcut = in_channels != out_channels or stride != 1 self.shortcut = ( ResNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity() ) self.layer = nn.Sequential( ResNetConvLayer(in_channels, out_channels, stride=stride), ResNetConvLayer(out_channels, out_channels, activation=None), ) self.activation = ACT2FN[activation] def forward(self, hidden_state): residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state class ResNetBottleNeckLayer(nn.Module): """ A classic ResNet's bottleneck layer composed by three `3x3` convolutions. The first `1x1` convolution reduces the input by a factor of `reduction` in order to make the second `3x3` convolution faster. The last `1x1` convolution remaps the reduced features to `out_channels`. If `downsample_in_bottleneck` is true, downsample will be in the first layer instead of the second layer. """ def __init__( self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu", reduction: int = 4, downsample_in_bottleneck: bool = False, ): super().__init__() should_apply_shortcut = in_channels != out_channels or stride != 1 reduces_channels = out_channels // reduction self.shortcut = ( ResNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity() ) self.layer = nn.Sequential( ResNetConvLayer( in_channels, reduces_channels, kernel_size=1, stride=stride if downsample_in_bottleneck else 1 ), ResNetConvLayer(reduces_channels, reduces_channels, stride=stride if not downsample_in_bottleneck else 1), ResNetConvLayer(reduces_channels, out_channels, kernel_size=1, activation=None), ) self.activation = ACT2FN[activation] def forward(self, hidden_state): residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state class ResNetStage(nn.Module): """ A ResNet stage composed by stacked layers. """ def __init__( self, config: ResNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, ): super().__init__() layer = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer if config.layer_type == "bottleneck": first_layer = layer( in_channels, out_channels, stride=stride, activation=config.hidden_act, downsample_in_bottleneck=config.downsample_in_bottleneck, ) else: first_layer = layer(in_channels, out_channels, stride=stride, activation=config.hidden_act) self.layers = nn.Sequential( first_layer, *[layer(out_channels, out_channels, activation=config.hidden_act) for _ in range(depth - 1)] ) def forward(self, input: Tensor) -> Tensor: hidden_state = input for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state class ResNetEncoder(nn.Module): def __init__(self, config: ResNetConfig): super().__init__() self.stages = nn.ModuleList([]) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( config, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], ) ) in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]): self.stages.append(ResNetStage(config, in_channels, out_channels, depth=depth)) def forward( self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True ) -> BaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=hidden_state, hidden_states=hidden_states, ) class ResNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ResNetConfig base_model_prefix = "resnet" main_input_name = "pixel_values" _no_split_modules = ["ResNetConvLayer", "ResNetShortCut"] def _init_weights(self, module): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") # copied from the `reset_parameters` method of `class Linear(Module)` in `torch`. elif isinstance(module, nn.Linear): nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5)) if module.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 nn.init.uniform_(module.bias, -bound, bound) elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) RESNET_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ RESNET_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top.", RESNET_START_DOCSTRING, ) class ResNetModel(ResNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embedder = ResNetEmbeddings(config) self.encoder = ResNetEncoder(config) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @add_start_docstrings( """ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, RESNET_START_DOCSTRING, ) class ResNetForImageClassification(ResNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.resnet = ResNetModel(config) # classification head self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(), ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> ImageClassifierOutputWithNoAttention: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.resnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) @add_start_docstrings( """ ResNet backbone, to be used with frameworks like DETR and MaskFormer. """, RESNET_START_DOCSTRING, ) class ResNetBackbone(ResNetPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.num_features = [config.embedding_size] + config.hidden_sizes self.embedder = ResNetEmbeddings(config) self.encoder = ResNetEncoder(config) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50") >>> model = AutoBackbone.from_pretrained( ... "microsoft/resnet-50", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 2048, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) embedding_output = self.embedder(pixel_values) outputs = self.encoder(embedding_output, output_hidden_states=True, return_dict=True) hidden_states = outputs.hidden_states feature_maps = () for idx, stage in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) __all__ = ["ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone"]
transformers/src/transformers/models/resnet/modeling_resnet.py/0
{ "file_path": "transformers/src/transformers/models/resnet/modeling_resnet.py", "repo_id": "transformers", "token_count": 8244 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RT Detr checkpoints with Timm backbone""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import RTDetrConfig, RTDetrForObjectDetection, RTDetrImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_rt_detr_config(model_name: str) -> RTDetrConfig: config = RTDetrConfig() config.num_labels = 80 repo_id = "huggingface/label-files" filename = "coco-detection-mmdet-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if model_name == "rtdetr_r18vd": config.backbone_config.hidden_sizes = [64, 128, 256, 512] config.backbone_config.depths = [2, 2, 2, 2] config.backbone_config.layer_type = "basic" config.encoder_in_channels = [128, 256, 512] config.hidden_expansion = 0.5 config.decoder_layers = 3 elif model_name == "rtdetr_r34vd": config.backbone_config.hidden_sizes = [64, 128, 256, 512] config.backbone_config.depths = [3, 4, 6, 3] config.backbone_config.layer_type = "basic" config.encoder_in_channels = [128, 256, 512] config.hidden_expansion = 0.5 config.decoder_layers = 4 elif model_name == "rtdetr_r50vd_m": pass elif model_name == "rtdetr_r50vd": pass elif model_name == "rtdetr_r101vd": config.backbone_config.depths = [3, 4, 23, 3] config.encoder_ffn_dim = 2048 config.encoder_hidden_dim = 384 config.decoder_in_channels = [384, 384, 384] elif model_name == "rtdetr_r18vd_coco_o365": config.backbone_config.hidden_sizes = [64, 128, 256, 512] config.backbone_config.depths = [2, 2, 2, 2] config.backbone_config.layer_type = "basic" config.encoder_in_channels = [128, 256, 512] config.hidden_expansion = 0.5 config.decoder_layers = 3 elif model_name == "rtdetr_r50vd_coco_o365": pass elif model_name == "rtdetr_r101vd_coco_o365": config.backbone_config.depths = [3, 4, 23, 3] config.encoder_ffn_dim = 2048 config.encoder_hidden_dim = 384 config.decoder_in_channels = [384, 384, 384] return config def create_rename_keys(config): # here we list all keys to be renamed (original name on the left, our name on the right) rename_keys = [] # stem # fmt: off last_key = ["weight", "bias", "running_mean", "running_var"] for level in range(3): rename_keys.append((f"backbone.conv1.conv1_{level+1}.conv.weight", f"model.backbone.model.embedder.embedder.{level}.convolution.weight")) for last in last_key: rename_keys.append((f"backbone.conv1.conv1_{level+1}.norm.{last}", f"model.backbone.model.embedder.embedder.{level}.normalization.{last}")) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): # shortcut if layer_idx == 0: if stage_idx == 0: rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.0.short.conv.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.0.shortcut.convolution.weight", ) ) for last in last_key: rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.0.short.norm.{last}", f"model.backbone.model.encoder.stages.{stage_idx}.layers.0.shortcut.normalization.{last}", ) ) else: rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.0.short.conv.conv.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.0.shortcut.1.convolution.weight", ) ) for last in last_key: rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.0.short.conv.norm.{last}", f"model.backbone.model.encoder.stages.{stage_idx}.layers.0.shortcut.1.normalization.{last}", ) ) rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2a.conv.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.convolution.weight", ) ) for last in last_key: rename_keys.append(( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2a.norm.{last}", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.{last}", )) rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2b.conv.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.convolution.weight", ) ) for last in last_key: rename_keys.append(( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2b.norm.{last}", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.{last}", )) # https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/nn/backbone/presnet.py#L171 if config.backbone_config.layer_type != "basic": rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2c.conv.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.2.convolution.weight", ) ) for last in last_key: rename_keys.append(( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2c.norm.{last}", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.2.normalization.{last}", )) # fmt: on for i in range(config.encoder_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"encoder.encoder.{i}.layers.0.self_attn.out_proj.weight", f"model.encoder.encoder.{i}.layers.0.self_attn.out_proj.weight", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.self_attn.out_proj.bias", f"model.encoder.encoder.{i}.layers.0.self_attn.out_proj.bias", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.linear1.weight", f"model.encoder.encoder.{i}.layers.0.fc1.weight", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.linear1.bias", f"model.encoder.encoder.{i}.layers.0.fc1.bias", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.linear2.weight", f"model.encoder.encoder.{i}.layers.0.fc2.weight", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.linear2.bias", f"model.encoder.encoder.{i}.layers.0.fc2.bias", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.norm1.weight", f"model.encoder.encoder.{i}.layers.0.self_attn_layer_norm.weight", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.norm1.bias", f"model.encoder.encoder.{i}.layers.0.self_attn_layer_norm.bias", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.norm2.weight", f"model.encoder.encoder.{i}.layers.0.final_layer_norm.weight", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.norm2.bias", f"model.encoder.encoder.{i}.layers.0.final_layer_norm.bias", ) ) for j in range(0, 3): rename_keys.append((f"encoder.input_proj.{j}.0.weight", f"model.encoder_input_proj.{j}.0.weight")) for last in last_key: rename_keys.append((f"encoder.input_proj.{j}.1.{last}", f"model.encoder_input_proj.{j}.1.{last}")) block_levels = 3 if config.backbone_config.layer_type != "basic" else 4 for i in range(len(config.encoder_in_channels) - 1): # encoder layers: hybridencoder parts for j in range(1, block_levels): rename_keys.append( (f"encoder.fpn_blocks.{i}.conv{j}.conv.weight", f"model.encoder.fpn_blocks.{i}.conv{j}.conv.weight") ) for last in last_key: rename_keys.append( ( f"encoder.fpn_blocks.{i}.conv{j}.norm.{last}", f"model.encoder.fpn_blocks.{i}.conv{j}.norm.{last}", ) ) rename_keys.append((f"encoder.lateral_convs.{i}.conv.weight", f"model.encoder.lateral_convs.{i}.conv.weight")) for last in last_key: rename_keys.append( (f"encoder.lateral_convs.{i}.norm.{last}", f"model.encoder.lateral_convs.{i}.norm.{last}") ) for j in range(3): for k in range(1, 3): rename_keys.append( ( f"encoder.fpn_blocks.{i}.bottlenecks.{j}.conv{k}.conv.weight", f"model.encoder.fpn_blocks.{i}.bottlenecks.{j}.conv{k}.conv.weight", ) ) for last in last_key: rename_keys.append( ( f"encoder.fpn_blocks.{i}.bottlenecks.{j}.conv{k}.norm.{last}", f"model.encoder.fpn_blocks.{i}.bottlenecks.{j}.conv{k}.norm.{last}", ) ) for j in range(1, block_levels): rename_keys.append( (f"encoder.pan_blocks.{i}.conv{j}.conv.weight", f"model.encoder.pan_blocks.{i}.conv{j}.conv.weight") ) for last in last_key: rename_keys.append( ( f"encoder.pan_blocks.{i}.conv{j}.norm.{last}", f"model.encoder.pan_blocks.{i}.conv{j}.norm.{last}", ) ) for j in range(3): for k in range(1, 3): rename_keys.append( ( f"encoder.pan_blocks.{i}.bottlenecks.{j}.conv{k}.conv.weight", f"model.encoder.pan_blocks.{i}.bottlenecks.{j}.conv{k}.conv.weight", ) ) for last in last_key: rename_keys.append( ( f"encoder.pan_blocks.{i}.bottlenecks.{j}.conv{k}.norm.{last}", f"model.encoder.pan_blocks.{i}.bottlenecks.{j}.conv{k}.norm.{last}", ) ) rename_keys.append( (f"encoder.downsample_convs.{i}.conv.weight", f"model.encoder.downsample_convs.{i}.conv.weight") ) for last in last_key: rename_keys.append( (f"encoder.downsample_convs.{i}.norm.{last}", f"model.encoder.downsample_convs.{i}.norm.{last}") ) for i in range(config.decoder_layers): # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"decoder.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias", ) ) rename_keys.append( (f"decoder.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"decoder.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"decoder.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"decoder.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((f"decoder.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight")) rename_keys.append((f"decoder.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias")) rename_keys.append((f"decoder.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight")) rename_keys.append((f"decoder.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"decoder.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append( (f"decoder.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias") ) for i in range(config.decoder_layers): # decoder + class and bounding box heads rename_keys.append( ( f"decoder.dec_score_head.{i}.weight", f"model.decoder.class_embed.{i}.weight", ) ) rename_keys.append( ( f"decoder.dec_score_head.{i}.bias", f"model.decoder.class_embed.{i}.bias", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.0.weight", f"model.decoder.bbox_embed.{i}.layers.0.weight", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.0.bias", f"model.decoder.bbox_embed.{i}.layers.0.bias", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.1.weight", f"model.decoder.bbox_embed.{i}.layers.1.weight", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.1.bias", f"model.decoder.bbox_embed.{i}.layers.1.bias", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.2.weight", f"model.decoder.bbox_embed.{i}.layers.2.weight", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.2.bias", f"model.decoder.bbox_embed.{i}.layers.2.bias", ) ) # decoder projection for i in range(len(config.decoder_in_channels)): rename_keys.append( ( f"decoder.input_proj.{i}.conv.weight", f"model.decoder_input_proj.{i}.0.weight", ) ) for last in last_key: rename_keys.append( ( f"decoder.input_proj.{i}.norm.{last}", f"model.decoder_input_proj.{i}.1.{last}", ) ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("decoder.denoising_class_embed.weight", "model.denoising_class_embed.weight"), ("decoder.query_pos_head.layers.0.weight", "model.decoder.query_pos_head.layers.0.weight"), ("decoder.query_pos_head.layers.0.bias", "model.decoder.query_pos_head.layers.0.bias"), ("decoder.query_pos_head.layers.1.weight", "model.decoder.query_pos_head.layers.1.weight"), ("decoder.query_pos_head.layers.1.bias", "model.decoder.query_pos_head.layers.1.bias"), ("decoder.enc_output.0.weight", "model.enc_output.0.weight"), ("decoder.enc_output.0.bias", "model.enc_output.0.bias"), ("decoder.enc_output.1.weight", "model.enc_output.1.weight"), ("decoder.enc_output.1.bias", "model.enc_output.1.bias"), ("decoder.enc_score_head.weight", "model.enc_score_head.weight"), ("decoder.enc_score_head.bias", "model.enc_score_head.bias"), ("decoder.enc_bbox_head.layers.0.weight", "model.enc_bbox_head.layers.0.weight"), ("decoder.enc_bbox_head.layers.0.bias", "model.enc_bbox_head.layers.0.bias"), ("decoder.enc_bbox_head.layers.1.weight", "model.enc_bbox_head.layers.1.weight"), ("decoder.enc_bbox_head.layers.1.bias", "model.enc_bbox_head.layers.1.bias"), ("decoder.enc_bbox_head.layers.2.weight", "model.enc_bbox_head.layers.2.weight"), ("decoder.enc_bbox_head.layers.2.bias", "model.enc_bbox_head.layers.2.bias"), ] ) return rename_keys def rename_key(state_dict, old, new): try: val = state_dict.pop(old) state_dict[new] = val except Exception: pass def read_in_q_k_v(state_dict, config): prefix = "" encoder_hidden_dim = config.encoder_hidden_dim # first: transformer encoder for i in range(config.encoder_layers): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"{prefix}encoder.encoder.{i}.layers.0.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}encoder.encoder.{i}.layers.0.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.q_proj.weight"] = in_proj_weight[ :encoder_hidden_dim, : ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.q_proj.bias"] = in_proj_bias[:encoder_hidden_dim] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.k_proj.weight"] = in_proj_weight[ encoder_hidden_dim : 2 * encoder_hidden_dim, : ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.k_proj.bias"] = in_proj_bias[ encoder_hidden_dim : 2 * encoder_hidden_dim ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.v_proj.weight"] = in_proj_weight[ -encoder_hidden_dim:, : ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.v_proj.bias"] = in_proj_bias[-encoder_hidden_dim:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(config.decoder_layers): # read in weights + bias of input projection layer of self-attention in_proj_weight = state_dict.pop(f"{prefix}decoder.decoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}decoder.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_rt_detr_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, repo_id): """ Copy/paste/tweak model's weights to our RTDETR structure. """ # load default config config = get_rt_detr_config(model_name) # load original model from torch hub model_name_to_checkpoint_url = { "rtdetr_r18vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r18vd_dec3_6x_coco_from_paddle.pth", "rtdetr_r34vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r34vd_dec4_6x_coco_from_paddle.pth", "rtdetr_r50vd_m": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r50vd_m_6x_coco_from_paddle.pth", "rtdetr_r50vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r50vd_6x_coco_from_paddle.pth", "rtdetr_r101vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r101vd_6x_coco_from_paddle.pth", "rtdetr_r18vd_coco_o365": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r18vd_5x_coco_objects365_from_paddle.pth", "rtdetr_r50vd_coco_o365": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r50vd_2x_coco_objects365_from_paddle.pth", "rtdetr_r101vd_coco_o365": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r101vd_2x_coco_objects365_from_paddle.pth", } logger.info(f"Converting model {model_name}...") state_dict = torch.hub.load_state_dict_from_url(model_name_to_checkpoint_url[model_name], map_location="cpu")[ "ema" ]["module"] # rename keys for src, dest in create_rename_keys(config): rename_key(state_dict, src, dest) # query, key and value matrices need special treatment read_in_q_k_v(state_dict, config) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them for key in state_dict.copy().keys(): if key.endswith("num_batches_tracked"): del state_dict[key] # for two_stage if "bbox_embed" in key or ("class_embed" in key and "denoising_" not in key): state_dict[key.split("model.decoder.")[-1]] = state_dict[key] # finally, create HuggingFace model and load state dict model = RTDetrForObjectDetection(config) model.load_state_dict(state_dict) model.eval() # load image processor image_processor = RTDetrImageProcessor() # prepare image img = prepare_img() # preprocess image transformations = transforms.Compose( [ transforms.Resize([640, 640], interpolation=transforms.InterpolationMode.BILINEAR), transforms.ToTensor(), ] ) original_pixel_values = transformations(img).unsqueeze(0) # insert batch dimension encoding = image_processor(images=img, return_tensors="pt") pixel_values = encoding["pixel_values"] assert torch.allclose(original_pixel_values, pixel_values) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) pixel_values = pixel_values.to(device) # Pass image by the model outputs = model(pixel_values) if model_name == "rtdetr_r18vd": expected_slice_logits = torch.tensor( [ [-4.3364253, -6.465683, -3.6130402], [-4.083815, -6.4039373, -6.97881], [-4.192215, -7.3410473, -6.9027247], ] ) expected_slice_boxes = torch.tensor( [ [0.16868353, 0.19833282, 0.21182671], [0.25559652, 0.55121744, 0.47988364], [0.7698693, 0.4124569, 0.46036878], ] ) elif model_name == "rtdetr_r34vd": expected_slice_logits = torch.tensor( [ [-4.3727384, -4.7921476, -5.7299604], [-4.840536, -8.455345, -4.1745796], [-4.1277084, -5.2154565, -5.7852697], ] ) expected_slice_boxes = torch.tensor( [ [0.258278, 0.5497808, 0.4732004], [0.16889669, 0.19890057, 0.21138911], [0.76632994, 0.4147879, 0.46851268], ] ) elif model_name == "rtdetr_r50vd_m": expected_slice_logits = torch.tensor( [ [-4.319764, -6.1349025, -6.094794], [-5.1056995, -7.744766, -4.803956], [-4.7685347, -7.9278393, -4.5751696], ] ) expected_slice_boxes = torch.tensor( [ [0.2582739, 0.55071366, 0.47660282], [0.16811174, 0.19954777, 0.21292639], [0.54986024, 0.2752091, 0.0561416], ] ) elif model_name == "rtdetr_r50vd": expected_slice_logits = torch.tensor( [ [-4.6476398, -5.001154, -4.9785104], [-4.1593494, -4.7038546, -5.946485], [-4.4374595, -4.658361, -6.2352347], ] ) expected_slice_boxes = torch.tensor( [ [0.16880608, 0.19992264, 0.21225442], [0.76837635, 0.4122631, 0.46368608], [0.2595386, 0.5483334, 0.4777486], ] ) elif model_name == "rtdetr_r101vd": expected_slice_logits = torch.tensor( [ [-4.6162, -4.9189, -4.6656], [-4.4701, -4.4997, -4.9659], [-5.6641, -7.9000, -5.0725], ] ) expected_slice_boxes = torch.tensor( [ [0.7707, 0.4124, 0.4585], [0.2589, 0.5492, 0.4735], [0.1688, 0.1993, 0.2108], ] ) elif model_name == "rtdetr_r18vd_coco_o365": expected_slice_logits = torch.tensor( [ [-4.8726, -5.9066, -5.2450], [-4.8157, -6.8764, -5.1656], [-4.7492, -5.7006, -5.1333], ] ) expected_slice_boxes = torch.tensor( [ [0.2552, 0.5501, 0.4773], [0.1685, 0.1986, 0.2104], [0.7692, 0.4141, 0.4620], ] ) elif model_name == "rtdetr_r50vd_coco_o365": expected_slice_logits = torch.tensor( [ [-4.6491, -3.9252, -5.3163], [-4.1386, -5.0348, -3.9016], [-4.4778, -4.5423, -5.7356], ] ) expected_slice_boxes = torch.tensor( [ [0.2583, 0.5492, 0.4747], [0.5501, 0.2754, 0.0574], [0.7693, 0.4137, 0.4613], ] ) elif model_name == "rtdetr_r101vd_coco_o365": expected_slice_logits = torch.tensor( [ [-4.5152, -5.6811, -5.7311], [-4.5358, -7.2422, -5.0941], [-4.6919, -5.5834, -6.0145], ] ) expected_slice_boxes = torch.tensor( [ [0.7703, 0.4140, 0.4583], [0.1686, 0.1991, 0.2107], [0.2570, 0.5496, 0.4750], ] ) else: raise ValueError(f"Unknown rt_detr_name: {model_name}") assert torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits.to(outputs.logits.device), atol=1e-4) assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes.to(outputs.pred_boxes.device), atol=1e-3) if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: # Upload model, image processor and config to the hub logger.info("Uploading PyTorch model and image processor to the hub...") config.push_to_hub( repo_id=repo_id, commit_message="Add config from convert_rt_detr_original_pytorch_checkpoint_to_pytorch.py" ) model.push_to_hub( repo_id=repo_id, commit_message="Add model from convert_rt_detr_original_pytorch_checkpoint_to_pytorch.py" ) image_processor.push_to_hub( repo_id=repo_id, commit_message="Add image processor from convert_rt_detr_original_pytorch_checkpoint_to_pytorch.py", ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", default="rtdetr_r50vd", type=str, help="model_name of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") parser.add_argument( "--repo_id", type=str, help="repo_id where the model will be pushed to.", ) args = parser.parse_args() convert_rt_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.repo_id)
transformers/src/transformers/models/rt_detr/convert_rt_detr_original_pytorch_checkpoint_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/rt_detr/convert_rt_detr_original_pytorch_checkpoint_to_hf.py", "repo_id": "transformers", "token_count": 17773 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SAM model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class SamPromptEncoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SamPromptEncoder`]. The [`SamPromptEncoder`] module is used to encode the input 2D points and bounding boxes. Instantiating a configuration defaults will yield a similar configuration to that of the SAM-vit-h [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. image_size (`int`, *optional*, defaults to 1024): The expected output resolution of the image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. mask_input_channels (`int`, *optional*, defaults to 16): The number of channels to be fed to the `MaskDecoder` module. num_point_embeddings (`int`, *optional*, defaults to 4): The number of point embeddings to be used. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the encoder and pooler. """ base_config_key = "prompt_encoder_config" def __init__( self, hidden_size=256, image_size=1024, patch_size=16, mask_input_channels=16, num_point_embeddings=4, hidden_act="gelu", layer_norm_eps=1e-6, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.image_size = image_size self.patch_size = patch_size self.image_embedding_size = image_size // patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps class SamMaskDecoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SamMaskDecoder`]. It is used to instantiate a SAM mask decoder to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the SAM-vit-h [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function used inside the `SamMaskDecoder` module. mlp_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 2): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. attention_downsample_rate (`int`, *optional*, defaults to 2): The downsampling rate of the attention layer. num_multimask_outputs (`int`, *optional*, defaults to 3): The number of outputs from the `SamMaskDecoder` module. In the Segment Anything paper, this is set to 3. iou_head_depth (`int`, *optional*, defaults to 3): The number of layers in the IoU head module. iou_head_hidden_dim (`int`, *optional*, defaults to 256): The dimensionality of the hidden states in the IoU head module. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. """ base_config_key = "mask_decoder_config" def __init__( self, hidden_size=256, hidden_act="relu", mlp_dim=2048, num_hidden_layers=2, num_attention_heads=8, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=256, layer_norm_eps=1e-6, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.layer_norm_eps = layer_norm_eps class SamVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SamVisionModel`]. It is used to instantiate a SAM vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the SAM ViT-h [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. output_channels (`int`, *optional*, defaults to 256): Dimensionality of the output channels in the Patch Encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. image_size (`int`, *optional*, defaults to 1024): Expected resolution. Target size of the resized input image. patch_size (`int`, *optional*, defaults to 16): Size of the patches to be extracted from the input image. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to query, key, value projections. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of mlp hidden dim to embedding dim. use_abs_pos (`bool`, *optional*, defaults to `True`): Whether to use absolute position embedding. use_rel_pos (`bool`, *optional*, defaults to `True`): Whether to use relative position embedding. window_size (`int`, *optional*, defaults to 14): Window size for relative position. global_attn_indexes (`List[int]`, *optional*, defaults to `[2, 5, 8, 11]`): The indexes of the global attention layers. num_pos_feats (`int`, *optional*, defaults to 128): The dimensionality of the position embedding. mlp_dim (`int`, *optional*): The dimensionality of the MLP layer in the Transformer encoder. If `None`, defaults to `mlp_ratio * hidden_size`. """ base_config_key = "vision_config" def __init__( self, hidden_size=768, output_channels=256, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=1024, patch_size=16, hidden_act="gelu", layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=128, mlp_dim=None, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.output_channels = output_channels self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = int(hidden_size * mlp_ratio) if mlp_dim is None else mlp_dim class SamConfig(PretrainedConfig): r""" [`SamConfig`] is the configuration class to store the configuration of a [`SamModel`]. It is used to instantiate a SAM model according to the specified arguments, defining the vision model, prompt-encoder model and mask decoder configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the SAM-ViT-H [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (Union[`dict`, `SamVisionConfig`], *optional*): Dictionary of configuration options used to initialize [`SamVisionConfig`]. prompt_encoder_config (Union[`dict`, `SamPromptEncoderConfig`], *optional*): Dictionary of configuration options used to initialize [`SamPromptEncoderConfig`]. mask_decoder_config (Union[`dict`, `SamMaskDecoderConfig`], *optional*): Dictionary of configuration options used to initialize [`SamMaskDecoderConfig`]. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ( ... SamVisionConfig, ... SamPromptEncoderConfig, ... SamMaskDecoderConfig, ... SamModel, ... ) >>> # Initializing a SamConfig with `"facebook/sam-vit-huge"` style configuration >>> configuration = SamConfig() >>> # Initializing a SamModel (with random weights) from the `"facebook/sam-vit-huge"` style configuration >>> model = SamModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a SamConfig from a SamVisionConfig, SamPromptEncoderConfig, and SamMaskDecoderConfig >>> # Initializing SAM vision, SAM Q-Former and language model configurations >>> vision_config = SamVisionConfig() >>> prompt_encoder_config = SamPromptEncoderConfig() >>> mask_decoder_config = SamMaskDecoderConfig() >>> config = SamConfig(vision_config, prompt_encoder_config, mask_decoder_config) ```""" model_type = "sam" sub_configs = { "prompt_encoder_config": SamPromptEncoderConfig, "mask_decoder_config": SamMaskDecoderConfig, "vision_config": SamVisionConfig, } def __init__( self, vision_config=None, prompt_encoder_config=None, mask_decoder_config=None, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) vision_config = vision_config if vision_config is not None else {} prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {} mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {} if isinstance(vision_config, SamVisionConfig): vision_config = vision_config.to_dict() if isinstance(prompt_encoder_config, SamPromptEncoderConfig): prompt_encoder_config = prompt_encoder_config.to_dict() if isinstance(mask_decoder_config, SamMaskDecoderConfig): mask_decoder_config = mask_decoder_config.to_dict() self.vision_config = SamVisionConfig(**vision_config) self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config) self.mask_decoder_config = SamMaskDecoderConfig(**mask_decoder_config) self.initializer_range = initializer_range __all__ = ["SamConfig", "SamMaskDecoderConfig", "SamPromptEncoderConfig", "SamVisionConfig"]
transformers/src/transformers/models/sam/configuration_sam.py/0
{ "file_path": "transformers/src/transformers/models/sam/configuration_sam.py", "repo_id": "transformers", "token_count": 5421 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Converting Meta SeamlessM4Tv2 checkpoints from seamless_communication to HF.""" import argparse import os from pathlib import Path import torch from accelerate.utils.modeling import find_tied_parameters from seamless_communication.inference import Translator from transformers import ( SeamlessM4TFeatureExtractor, SeamlessM4TProcessor, SeamlessM4TTokenizer, SeamlessM4Tv2Config, SeamlessM4Tv2Model, ) from transformers.utils import logging # fmt: off UNIT_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kan__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tam__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__", ] # fmt: on # fmt: off VOCODER_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__",] # fmt: on # fmt: off LARGE_SUPPORTED_LANGUAGES = ["afr","amh","arb","ary","arz","asm","azj","bel","ben","bos","bul","cat","ceb","ces","ckb","cmn","cmn_Hant","cym","dan","deu","ell","eng","est","eus","fin","fra","fuv","gaz","gle","glg","guj","heb","hin","hrv","hun","hye","ibo","ind","isl","ita","jav","jpn","kan","kat","kaz","khk","khm","kir","kor","lao","lit","lug","luo","lvs","mai","mal","mar","mkd","mlt","mni","mya","nld","nno","nob","npi","nya","ory","pan","pbt","pes","pol","por","ron","rus","sat","slk","slv","sna","snd","som","spa","srp","swe","swh","tam","tel","tgk","tgl","tha","tur","ukr","urd","uzn","vie","yor","yue","zlm","zul",] # fmt: on def assert_param_count(model_1, model_2): count_1 = sum(p[1].numel() for p in model_1.named_parameters() if "final_proj" not in p[0]) count_2 = sum(p[1].numel() for p in model_2.named_parameters() if "final_proj" not in p[0]) assert count_1 == count_2, f"{model_1.__class__}: {count_1} != {model_2.__class__}: {count_2}" def param_count(model): return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0]) def _grab_best_device(use_gpu=True): if torch.cuda.device_count() > 0 and use_gpu: device = "cuda" else: device = "cpu" return torch.device(device) logging.set_verbosity_info() logger = logging.get_logger(__name__) vocoder_convert_list = [ ("ups", "hifi_gan.upsampler"), ("conv_pre", "hifi_gan.conv_pre"), ("resblocks", "hifi_gan.resblocks"), ("conv_post", "hifi_gan.conv_post"), ("lang", "language_embedding"), ("spkr", "speaker_embedding"), ("dict.", "unit_embedding."), ("dur_predictor.conv1.0", "dur_predictor.conv1"), ("dur_predictor.conv2.0", "dur_predictor.conv2"), ] # order is important wav2vec_convert_list = [ ("speech_encoder_frontend.model_dim_proj", "feature_projection.projection"), ("speech_encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"), ("speech_encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"), ("speech_encoder.inner.layers", "encoder.layers"), ("speech_encoder.inner_layer_norm", "encoder.layer_norm"), ("speech_encoder.adaptor_layers", "adapter.layers"), ("inner_proj", "intermediate_dense"), ("self_attn.output_proj", "self_attn.linear_out"), ("output_proj", "output_dense"), ("self_attn.k_proj", "self_attn.linear_k"), ("self_attn.v_proj", "self_attn.linear_v"), ("self_attn.q_proj", "self_attn.linear_q"), ("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"), ("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"), ("self_attn.sdpa.rel_k_embed", "self_attn.distance_embedding"), ("self_attn.sdpa.r_proj", "self_attn.linear_pos"), ("conv.pointwise_conv1", "conv_module.pointwise_conv1"), ("conv.pointwise_conv2", "conv_module.pointwise_conv2"), ("conv.depthwise_conv", "conv_module.depthwise_conv"), ("conv.batch_norm", "conv_module.batch_norm"), ("conv.layer_norm", "conv_module.depthwise_layer_norm"), ("conv_layer_norm", "conv_module.layer_norm"), ("speech_encoder.proj1", "intermediate_ffn.intermediate_dense"), ("speech_encoder.proj2", "intermediate_ffn.output_dense"), ("speech_encoder.layer_norm", "inner_layer_norm"), ] t2u_convert_list = [ ("t2u_model.final_proj", "lm_head"), ("t2u_model.", "model."), ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"), ("encoder_decoder_attn", "cross_attention"), ("linear_k", "k_proj"), ("linear_v", "v_proj"), ("linear_q", "q_proj"), ("ffn.inner_proj", "ffn.fc1"), ("ffn.output_proj", "ffn.fc2"), ("output_proj", "out_proj"), ("decoder_frontend.embed_char", "decoder.embed_char"), ("decoder_frontend.pos_emb_alpha_char", "decoder.pos_emb_alpha_char"), ("decoder_frontend.embed", "decoder.embed_tokens"), ("decoder_frontend.pos_emb_alpha", "decoder.pos_emb_alpha"), ("conv1d.conv", "conv"), ("conv1d_layer_norm", "conv_layer_norm"), ("decoder_frontend.variance_adaptor", "decoder"), ("duration_predictor.conv1.0", "duration_predictor.conv1"), ("duration_predictor.conv2.0", "duration_predictor.conv2"), ] text_convert_list = [ ("text_encoder.", ""), ("text_decoder.", ""), ("text_encoder_frontend.embed", "embed_tokens"), ("text_decoder_frontend.embed", "embed_tokens"), ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"), ("encoder_decoder_attn", "cross_attention"), ("linear_k", "k_proj"), ("linear_v", "v_proj"), ("linear_q", "q_proj"), ("ffn.inner_proj", "ffn.fc1"), ("ffn.output_proj", "ffn.fc2"), ("output_proj", "out_proj"), ("final_proj", "lm_head"), ] CUR_PATH = os.path.dirname(os.path.abspath(__file__)) default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache") CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "huggingface", "hub") def _load_hf_config(): return SeamlessM4Tv2Config() def _convert_model( original_model, hf_model, convert_list, device, unwanted_prefix="model.", filter_state_dict="speech", exclude_state_dict=None, ): state_dict = original_model.state_dict() # filter func if isinstance(filter_state_dict, str): def filter_func(x): return filter_state_dict in x[0] else: def filter_func(item): if exclude_state_dict is not None and exclude_state_dict in item[0]: return False for filter_el in filter_state_dict: if filter_el in item[0]: return True return False state_dict = dict(filter(filter_func, state_dict.items())) for k, v in list(state_dict.items()): new_k = k[len(unwanted_prefix) :] for old_layer_name, new_layer_name in convert_list: if old_layer_name in new_k: new_k = new_k.replace(old_layer_name, new_layer_name) # must do it by hand if ".layer_norm" in new_k and new_k.split(".layer_norm")[0][-1].isnumeric(): new_k = new_k.replace("layer_norm", "final_layer_norm") state_dict[new_k] = state_dict.pop(k) extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys()) extra_keys = set(extra_keys) missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys()) missing_keys = set({k for k in missing_keys if "final_logits_bias" not in k}) if len(extra_keys) != 0: raise ValueError(f"extra keys found: {extra_keys}") if len(missing_keys) != 0: raise ValueError(f"missing keys: {missing_keys}") hf_model.load_state_dict(state_dict, strict=False) n_params = param_count(hf_model) logger.info(f"model loaded: {round(n_params/1e6,1)}M params") hf_model.eval() hf_model.to(device) del state_dict return hf_model def load_model(save_dir, model_type, repo_id): """ Meta SeamlessM4Tv2 is made of 8 main components: - speech_encoder (#1) and speech_encoder_frontend (#2) - t2u_model (#3) - text_encoder (#4) and text_encoder_frontend (#5) - text_decoder (#6) [and text_decoder_frontend (#5) = equals to text_encoder_frontend] - final_proj (#7) - vocoder (#8) """ device = _grab_best_device() name = "seamlessM4T_v2_large" original_model = Translator(name, "vocoder_v2", device, dtype=torch.float32) ######### TOKENIZER langs = LARGE_SUPPORTED_LANGUAGES langs = [f"__{lang}__" for lang in langs] vocab_file = os.path.join(os.path.expanduser("~"), "tokenizer", model_type, "tokenizer.model") save_dir = os.path.join(save_dir, name) Path(save_dir).mkdir(exist_ok=True) tokenizer = SeamlessM4TTokenizer(vocab_file, additional_special_tokens=langs) sanity_check_lang_id = tokenizer.convert_tokens_to_ids("__fra__") tokenizer.save_pretrained(save_dir) tokenizer = SeamlessM4TTokenizer.from_pretrained(save_dir) if sanity_check_lang_id != tokenizer.convert_tokens_to_ids("__fra__"): raise ValueError( f"Error in tokenizer saving/loading - __fra__ lang id is not coherent: {sanity_check_lang_id} vs {tokenizer.convert_tokens_to_ids('__fra__')}" ) ####### get language to ids dict text_decoder_lang_code_to_id = {lang.replace("__", ""): tokenizer.convert_tokens_to_ids(lang) for lang in langs} # offset: vocoder unit vocab size + 5 (for EOS/PAD/BOS/UNK/MSK) + len(supported_languages) t2u_lang_code_to_id = { code.replace("__", ""): i + 10005 + len(UNIT_SUPPORTED_LANGUAGES) for i, code in enumerate(UNIT_SUPPORTED_LANGUAGES) } vocoder_lang_code_to_id = {code.replace("__", ""): i for i, code in enumerate(VOCODER_SUPPORTED_LANGUAGES)} ######### FE fe = SeamlessM4TFeatureExtractor(language_code=langs) fe.save_pretrained(save_dir) fe = SeamlessM4TFeatureExtractor.from_pretrained(save_dir) processor = SeamlessM4TProcessor(feature_extractor=fe, tokenizer=tokenizer) processor.save_pretrained(save_dir) processor.push_to_hub(repo_id=repo_id, create_pr=True) processor = SeamlessM4TProcessor.from_pretrained(save_dir) ######## Model # init config hf_config = _load_hf_config() ######## get id_to_text and char_to_id from original model tokenizers id_to_text = {i: original_model.text_tokenizer.model.index_to_token(i) for i in range(hf_config.vocab_size)} char_to_id = { original_model.model.t2u_model.decoder_frontend.char_tokenizer.model.index_to_token(i): i for i in range(10904) } # init model hf_model = SeamlessM4Tv2Model(hf_config) hf_model.generation_config.__setattr__("text_decoder_lang_to_code_id", text_decoder_lang_code_to_id) hf_model.generation_config.__setattr__("t2u_lang_code_to_id", t2u_lang_code_to_id) hf_model.generation_config.__setattr__("vocoder_lang_code_to_id", vocoder_lang_code_to_id) hf_model.generation_config.__setattr__("id_to_text", id_to_text) hf_model.generation_config.__setattr__("char_to_id", char_to_id) # -1. take care of vocoder # similarly to speech T5 must apply and remove weight norm hf_model.vocoder.apply_weight_norm() hf_model.vocoder = _convert_model( original_model, hf_model.vocoder, vocoder_convert_list, device, unwanted_prefix="vocoder.code_generator.", filter_state_dict="vocoder", ) hf_model.vocoder.remove_weight_norm() # 1. take care of speech encoder wav2vec = hf_model.speech_encoder hf_model.speech_encoder = _convert_model( original_model, wav2vec, wav2vec_convert_list, device, unwanted_prefix="model.", filter_state_dict="speech" ) # 2. take care of t2u hf_model.t2u_model = _convert_model( original_model, hf_model.t2u_model, t2u_convert_list, device, unwanted_prefix="model.", filter_state_dict="t2u_model", ) # 3. take care of text encoder hf_model.text_encoder = _convert_model( original_model, hf_model.text_encoder, text_convert_list, device, unwanted_prefix="model.", filter_state_dict=["model.text_encoder"], exclude_state_dict="t2u_model", ) # 4. take care of text decoder hf_model.text_decoder = _convert_model( original_model, hf_model.text_decoder, text_convert_list, device, unwanted_prefix="model.", filter_state_dict=["model.text_decoder"], exclude_state_dict="t2u_model", ) # 5. take care of final proj hf_model.lm_head = _convert_model( original_model, hf_model.lm_head, [("final_proj.", "")], device, unwanted_prefix="model.", filter_state_dict=["model.final_proj"], exclude_state_dict="t2u_model", ) # sanity check print(find_tied_parameters(hf_model)) count_1 = param_count(hf_model) count_2 = param_count(original_model) print(f"HF MODEL:{count_1}, ORIGINAL_MODEL: {count_2}, diff:{count_1 - count_2}") print(f"HF MODEL excluding embeddings:{hf_model.num_parameters(exclude_embeddings=True)}") del original_model hf_model.generation_config._from_model_config = False hf_model.save_pretrained(save_dir) hf_model.push_to_hub(repo_id=repo_id, create_pr=True) hf_model = SeamlessM4Tv2Model.from_pretrained(save_dir) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default="large", type=str, help="Model type.", ) parser.add_argument( "--save_dir", default="/home/ubuntu/weights_v2", type=str, help="Path to the output PyTorch model.", ) parser.add_argument( "--repo_id", default="facebook/seamless-m4t-v2-large", type=str, help="Repo ID.", ) args = parser.parse_args() load_model(args.save_dir, args.model_type, args.repo_id)
transformers/src/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py", "repo_id": "transformers", "token_count": 6572 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SEW checkpoint.""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights(fairseq_model, hf_model, is_finetuned): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True else: for key, mapped_key in MAPPING.items(): mapped_key = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "weight" in name: weight_type = "weight" elif "bias" in name: weight_type = "bias" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) def convert_config(model, is_finetuned): config = SEWConfig() if is_finetuned: fs_config = model.w2v_encoder.w2v_model.cfg else: fs_config = model.cfg config.conv_bias = fs_config.conv_bias conv_layers = eval(fs_config.conv_feature_layers) config.conv_dim = [x[0] for x in conv_layers] config.conv_kernel = [x[1] for x in conv_layers] config.conv_stride = [x[2] for x in conv_layers] config.feat_extract_activation = "gelu" config.feat_extract_norm = "layer" if fs_config.extractor_mode == "layer_norm" else "group" config.final_dropout = 0.0 config.hidden_act = fs_config.activation_fn.name config.hidden_size = fs_config.encoder_embed_dim config.initializer_range = 0.02 config.intermediate_size = fs_config.encoder_ffn_embed_dim config.layer_norm_eps = 1e-5 config.layerdrop = fs_config.encoder_layerdrop config.num_attention_heads = fs_config.encoder_attention_heads config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups config.num_conv_pos_embeddings = fs_config.conv_pos config.num_feat_extract_layers = len(conv_layers) config.num_hidden_layers = fs_config.encoder_layers config.squeeze_factor = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: fs_config = model.cfg config.final_dropout = fs_config.final_dropout config.layerdrop = fs_config.layerdrop config.activation_dropout = fs_config.activation_dropout config.apply_spec_augment = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 config.attention_dropout = fs_config.attention_dropout config.feat_proj_dropout = fs_config.dropout_input config.hidden_dropout = fs_config.dropout config.mask_feature_length = fs_config.mask_channel_length config.mask_feature_prob = fs_config.mask_channel_prob config.mask_time_length = fs_config.mask_length config.mask_time_prob = fs_config.mask_prob config.feature_extractor_type = "Wav2Vec2FeatureExtractor" config.tokenizer_class = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def convert_sew_checkpoint( checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True ): """ Copy/paste/tweak model's weights to transformers design. """ if is_finetuned: model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])} ) else: model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) if config_path is not None: config = SEWConfig.from_pretrained(config_path) else: config = convert_config(model[0], is_finetuned) model = model[0].eval() return_attention_mask = True if config.feat_extract_norm == "layer" else False feature_extractor = Wav2Vec2FeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=True, return_attention_mask=return_attention_mask, ) if is_finetuned: if dict_path: target_dict = Dictionary.load(dict_path) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq target_dict.indices[target_dict.bos_word] = target_dict.pad_index target_dict.indices[target_dict.pad_word] = target_dict.bos_index config.bos_token_id = target_dict.pad_index config.pad_token_id = target_dict.bos_index config.eos_token_id = target_dict.eos_index config.vocab_size = len(target_dict.symbols) vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json") if not os.path.isdir(pytorch_dump_folder_path): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path)) return os.makedirs(pytorch_dump_folder_path, exist_ok=True) with open(vocab_path, "w", encoding="utf-8") as vocab_handle: json.dump(target_dict.indices, vocab_handle) tokenizer = Wav2Vec2CTCTokenizer( vocab_path, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=False, ) processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) processor.save_pretrained(pytorch_dump_folder_path) hf_model = SEWForCTC(config) else: hf_model = SEWModel(config) feature_extractor.save_pretrained(pytorch_dump_folder_path) recursively_load_weights(model, hf_model, is_finetuned) hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) args = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
transformers/src/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 5650 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Wav2Vec2 checkpoint.""" import argparse import fairseq import torch from torch import nn from transformers import ( MBart50Tokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2Model, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } TOP_LEVEL_KEYS = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights_wav2vec2(fairseq_model, hf_model): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.feature_extractor adapter = hf_model.adapter for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."]): load_adapter(name, value, adapter, unused_weights) is_used = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "bias" in name: weight_type = "bias" elif "weight" in name: weight_type = "weight" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) def load_adapter(full_name, value, adapter, unused_weights): name = full_name.split("adaptor.")[-1] items = name.split(".") if items[1].isdigit(): layer_id = int(items[1]) else: layer_id = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." adapter.proj_layer_norm.bias.data = value logger.info(f"Adapter proj layer norm bias was initialized from {full_name}.") if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." adapter.proj_layer_norm.weight.data = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." adapter.proj.bias.data = value logger.info(f"Adapter proj layer bias was initialized from {full_name}.") if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." adapter.proj.weight.data = value logger.info(f"Adapter proj layer weight was initialized from {full_name}.") elif isinstance(layer_id, int): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." adapter.layers[layer_id].conv.bias.data = value logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.") elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." adapter.layers[layer_id].conv.weight.data = value logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.") else: unused_weights.append(full_name) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer @torch.no_grad() def convert_wav2vec2_checkpoint( checkpoint_path, pytorch_dump_folder_path, dict_path, config_yaml_path, encoder_config_path, decoder_config_path, add_adapter, adapter_kernel_size, adapter_stride, decoder_start_token_id, encoder_output_dim, ): """ Copy/paste/tweak model's weights to transformers design. """ # load configs encoder_config = Wav2Vec2Config.from_pretrained( encoder_config_path, add_adapter=True, adapter_stride=adapter_stride, adapter_kernel_size=adapter_kernel_size, token_token=True, output_hidden_size=encoder_output_dim, ) decoder_config = MBartConfig.from_pretrained(decoder_config_path) # load model model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/")[:-1]), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, }, ) model = model[0].eval() # load feature extractor feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(encoder_config_path, token_token=True) # set weights for wav2vec2 encoder hf_encoder = Wav2Vec2Model(encoder_config) recursively_load_weights_wav2vec2(model.encoder, hf_encoder) # load decoder weights hf_decoder = MBartForCausalLM(decoder_config) missing_keys, unexpected_keys = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=False) logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}") logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}") hf_wav2vec = SpeechEncoderDecoderModel(encoder=hf_encoder, decoder=hf_decoder) hf_wav2vec.config.tie_word_embeddings = False tokenizer = MBart50Tokenizer(dict_path) tokenizer.save_pretrained(pytorch_dump_folder_path) config = hf_wav2vec.config.to_dict() config["pad_token_id"] = tokenizer.pad_token_id config["bos_token_id"] = tokenizer.bos_token_id config["eos_token_id"] = tokenizer.eos_token_id config["tokenizer_class"] = "mbart50" config["feature_extractor_type"] = "wav2vec2" config["decoder_start_token_id"] = tokenizer.eos_token_id config["forced_bos_token_id"] = 250004 config["forced_eos_token_id"] = tokenizer.eos_token_id hf_wav2vec.config = SpeechEncoderDecoderConfig.from_dict(config) hf_wav2vec.save_pretrained(pytorch_dump_folder_path) feature_extractor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-xls-r-1b", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/mbart-large-50-one-to-many-mmt", type=str, help="Path to hf decoder checkpoint config", ) parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers") parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers") parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers") parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim") parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config") args = parser.parse_args() convert_wav2vec2_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
transformers/src/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py", "repo_id": "transformers", "token_count": 6577 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for SpeechT5.""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging logger = logging.get_logger(__name__) class SpeechT5FeatureExtractor(SequenceFeatureExtractor): r""" Constructs a SpeechT5 feature extractor. This class can pre-process a raw speech signal by (optionally) normalizing to zero-mean unit-variance, for use by the SpeechT5 speech encoder prenet. This class can also extract log-mel filter bank features from raw speech, for use by the SpeechT5 speech decoder prenet. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values. do_normalize (`bool`, *optional*, defaults to `False`): Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly improve the performance for some models. num_mel_bins (`int`, *optional*, defaults to 80): The number of mel-frequency bins in the extracted spectrogram features. hop_length (`int`, *optional*, defaults to 16): Number of ms between windows. Otherwise referred to as "shift" in many papers. win_length (`int`, *optional*, defaults to 64): Number of ms per window. win_function (`str`, *optional*, defaults to `"hann_window"`): Name for the window function used for windowing, must be accessible via `torch.{win_function}` frame_signal_scale (`float`, *optional*, defaults to 1.0): Constant multiplied in creating the frames before applying DFT. This argument is deprecated. fmin (`float`, *optional*, defaults to 80): Minimum mel frequency in Hz. fmax (`float`, *optional*, defaults to 7600): Maximum mel frequency in Hz. mel_floor (`float`, *optional*, defaults to 1e-10): Minimum value of mel frequency banks. reduction_factor (`int`, *optional*, defaults to 2): Spectrogram length reduction factor. This argument is deprecated. return_attention_mask (`bool`, *optional*, defaults to `True`): Whether or not [`~SpeechT5FeatureExtractor.__call__`] should return `attention_mask`. """ model_input_names = ["input_values", "attention_mask"] def __init__( self, feature_size: int = 1, sampling_rate: int = 16000, padding_value: float = 0.0, do_normalize: bool = False, num_mel_bins: int = 80, hop_length: int = 16, win_length: int = 64, win_function: str = "hann_window", frame_signal_scale: float = 1.0, fmin: float = 80, fmax: float = 7600, mel_floor: float = 1e-10, reduction_factor: int = 2, return_attention_mask: bool = True, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.do_normalize = do_normalize self.return_attention_mask = return_attention_mask self.num_mel_bins = num_mel_bins self.hop_length = hop_length self.win_length = win_length self.win_function = win_function self.frame_signal_scale = frame_signal_scale self.fmin = fmin self.fmax = fmax self.mel_floor = mel_floor self.reduction_factor = reduction_factor self.sample_size = win_length * sampling_rate // 1000 self.sample_stride = hop_length * sampling_rate // 1000 self.n_fft = optimal_fft_length(self.sample_size) self.n_freqs = (self.n_fft // 2) + 1 self.window = window_function(window_length=self.sample_size, name=self.win_function, periodic=True) self.mel_filters = mel_filter_bank( num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm="slaney", mel_scale="slaney", ) if frame_signal_scale != 1.0: warnings.warn( "The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers", FutureWarning, ) if reduction_factor != 2.0: warnings.warn( "The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers", FutureWarning, ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def zero_mean_unit_var_norm( input_values: List[np.ndarray], attention_mask: List[np.ndarray], padding_value: float = 0.0 ) -> List[np.ndarray]: """ Every array in the list is normalized to have zero mean and unit variance """ if attention_mask is not None: attention_mask = np.array(attention_mask, np.int32) normed_input_values = [] for vector, length in zip(input_values, attention_mask.sum(-1)): normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7) if length < normed_slice.shape[0]: normed_slice[length:] = padding_value normed_input_values.append(normed_slice) else: normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values] return normed_input_values def _extract_mel_features( self, one_waveform: np.ndarray, ) -> np.ndarray: """ Extracts log-mel filterbank features for one waveform array (unbatched). """ log_mel_spec = spectrogram( one_waveform, window=self.window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, mel_filters=self.mel_filters, mel_floor=self.mel_floor, log_mel="log10", ) return log_mel_spec.T def __call__( self, audio: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None, audio_target: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None, padding: Union[bool, str, PaddingStrategy] = False, max_length: Optional[int] = None, truncation: bool = False, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Pass in a value for `audio` to extract waveform features. Pass in a value for `audio_target` to extract log-mel spectrogram features. Args: audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`, *optional*): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. This outputs waveform features. Must be mono channel audio, not stereo, i.e. single float per timestep. audio_target (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`, *optional*): The sequence or batch of sequences to be processed as targets. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. This outputs log-mel spectrogram features. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` or `audio_target` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. """ if audio is None and audio_target is None: raise ValueError("You must provide either `audio` or `audio_target` values.") if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if audio is not None: inputs = self._process_audio( audio, False, padding, max_length, truncation, pad_to_multiple_of, return_attention_mask, return_tensors, **kwargs, ) else: inputs = None if audio_target is not None: inputs_target = self._process_audio( audio_target, True, padding, max_length, truncation, pad_to_multiple_of, return_attention_mask, return_tensors, **kwargs, ) if inputs is None: return inputs_target else: inputs["labels"] = inputs_target["input_values"] decoder_attention_mask = inputs_target.get("attention_mask") if decoder_attention_mask is not None: inputs["decoder_attention_mask"] = decoder_attention_mask return inputs def _process_audio( self, speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], is_target: bool = False, padding: Union[bool, str, PaddingStrategy] = False, max_length: Optional[int] = None, truncation: bool = False, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: is_batched_numpy = isinstance(speech, np.ndarray) and len(speech.shape) > 1 if is_batched_numpy and len(speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(speech, (list, tuple)) and (isinstance(speech[0], (np.ndarray, tuple, list))) ) if is_batched: speech = [np.asarray(speech, dtype=np.float32) for speech in speech] elif not is_batched and not isinstance(speech, np.ndarray): speech = np.asarray(speech, dtype=np.float32) elif isinstance(speech, np.ndarray) and speech.dtype is np.dtype(np.float64): speech = speech.astype(np.float32) # always return batch if not is_batched: speech = [speech] # needed to make pad() work on spectrogram inputs feature_size_hack = self.feature_size # convert into correct format for padding if is_target: features = [self._extract_mel_features(waveform) for waveform in speech] encoded_inputs = BatchFeature({"input_values": features}) self.feature_size = self.num_mel_bins else: encoded_inputs = BatchFeature({"input_values": speech}) padded_inputs = self.pad( encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, **kwargs, ) self.feature_size = feature_size_hack # convert input values to correct format input_values = padded_inputs["input_values"] if not isinstance(input_values[0], np.ndarray): padded_inputs["input_values"] = [np.asarray(array, dtype=np.float32) for array in input_values] elif ( not isinstance(input_values, np.ndarray) and isinstance(input_values[0], np.ndarray) and input_values[0].dtype is np.dtype(np.float64) ): padded_inputs["input_values"] = [array.astype(np.float32) for array in input_values] elif isinstance(input_values, np.ndarray) and input_values.dtype is np.dtype(np.float64): padded_inputs["input_values"] = input_values.astype(np.float32) # convert attention_mask to correct format attention_mask = padded_inputs.get("attention_mask") if attention_mask is not None: padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: attention_mask = ( attention_mask if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD else None ) padded_inputs["input_values"] = self.zero_mean_unit_var_norm( padded_inputs["input_values"], attention_mask=attention_mask, padding_value=self.padding_value ) if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs def to_dict(self) -> Dict[str, Any]: output = super().to_dict() # Don't serialize these as they are derived from the other properties. names = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"] for name in names: if name in output: del output[name] return output __all__ = ["SpeechT5FeatureExtractor"]
transformers/src/transformers/models/speecht5/feature_extraction_speecht5.py/0
{ "file_path": "transformers/src/transformers/models/speecht5/feature_extraction_speecht5.py", "repo_id": "transformers", "token_count": 7620 }
# coding=utf-8 # Copyright 2024 the Fast authors and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TextNet model configuration""" from transformers import PretrainedConfig from transformers.utils import logging from transformers.utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class TextNetConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TextNextModel`]. It is used to instantiate a TextNext model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [czczup/textnet-base](https://huggingface.co/czczup/textnet-base). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.Read the documentation from [`PretrainedConfig`] for more information. Args: stem_kernel_size (`int`, *optional*, defaults to 3): The kernel size for the initial convolution layer. stem_stride (`int`, *optional*, defaults to 2): The stride for the initial convolution layer. stem_num_channels (`int`, *optional*, defaults to 3): The num of channels in input for the initial convolution layer. stem_out_channels (`int`, *optional*, defaults to 64): The num of channels in out for the initial convolution layer. stem_act_func (`str`, *optional*, defaults to `"relu"`): The activation function for the initial convolution layer. image_size (`Tuple[int, int]`, *optional*, defaults to `[640, 640]`): The size (resolution) of each image. conv_layer_kernel_sizes (`List[List[List[int]]]`, *optional*): A list of stage-wise kernel sizes. If `None`, defaults to: `[[[3, 3], [3, 3], [3, 3]], [[3, 3], [1, 3], [3, 3], [3, 1]], [[3, 3], [3, 3], [3, 1], [1, 3]], [[3, 3], [3, 1], [1, 3], [3, 3]]]`. conv_layer_strides (`List[List[int]]`, *optional*): A list of stage-wise strides. If `None`, defaults to: `[[1, 2, 1], [2, 1, 1, 1], [2, 1, 1, 1], [2, 1, 1, 1]]`. hidden_sizes (`List[int]`, *optional*, defaults to `[64, 64, 128, 256, 512]`): Dimensionality (hidden size) at each stage. batch_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the batch normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Examples: ```python >>> from transformers import TextNetConfig, TextNetBackbone >>> # Initializing a TextNetConfig >>> configuration = TextNetConfig() >>> # Initializing a model (with random weights) >>> model = TextNetBackbone(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "textnet" def __init__( self, stem_kernel_size=3, stem_stride=2, stem_num_channels=3, stem_out_channels=64, stem_act_func="relu", image_size=[640, 640], conv_layer_kernel_sizes=None, conv_layer_strides=None, hidden_sizes=[64, 64, 128, 256, 512], batch_norm_eps=1e-5, initializer_range=0.02, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) if conv_layer_kernel_sizes is None: conv_layer_kernel_sizes = [ [[3, 3], [3, 3], [3, 3]], [[3, 3], [1, 3], [3, 3], [3, 1]], [[3, 3], [3, 3], [3, 1], [1, 3]], [[3, 3], [3, 1], [1, 3], [3, 3]], ] if conv_layer_strides is None: conv_layer_strides = [[1, 2, 1], [2, 1, 1, 1], [2, 1, 1, 1], [2, 1, 1, 1]] self.stem_kernel_size = stem_kernel_size self.stem_stride = stem_stride self.stem_num_channels = stem_num_channels self.stem_out_channels = stem_out_channels self.stem_act_func = stem_act_func self.image_size = image_size self.conv_layer_kernel_sizes = conv_layer_kernel_sizes self.conv_layer_strides = conv_layer_strides self.initializer_range = initializer_range self.hidden_sizes = hidden_sizes self.batch_norm_eps = batch_norm_eps self.depths = [len(layer) for layer in self.conv_layer_kernel_sizes] self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, 5)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) __all__ = ["TextNetConfig"]
transformers/src/transformers/models/textnet/configuration_textnet.py/0
{ "file_path": "transformers/src/transformers/models/textnet/configuration_textnet.py", "repo_id": "transformers", "token_count": 2473 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Any, Dict, Optional, Tuple, Union import torch from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import to_pil_image from ...image_utils import ImageInput, make_list_of_images from ...utils import TensorType, logging, requires_backends from ...utils.import_utils import is_timm_available, is_torch_available if is_timm_available(): import timm if is_torch_available(): import torch logger = logging.get_logger(__name__) class TimmWrapperImageProcessor(BaseImageProcessor): """ Wrapper class for timm models to be used within transformers. Args: pretrained_cfg (`Dict[str, Any]`): The configuration of the pretrained model used to resolve evaluation and training transforms. architecture (`Optional[str]`, *optional*): Name of the architecture of the model. """ main_input_name = "pixel_values" def __init__( self, pretrained_cfg: Dict[str, Any], architecture: Optional[str] = None, **kwargs, ): requires_backends(self, "timm") super().__init__(architecture=architecture) self.data_config = timm.data.resolve_data_config(pretrained_cfg, model=None, verbose=False) self.val_transforms = timm.data.create_transform(**self.data_config, is_training=False) # useful for training, see examples/pytorch/image-classification/run_image_classification.py self.train_transforms = timm.data.create_transform(**self.data_config, is_training=True) # If `ToTensor` is in the transforms, then the input should be numpy array or PIL image. # Otherwise, the input can be a tensor. In later timm versions, `MaybeToTensor` is used # which can handle both numpy arrays / PIL images and tensors. self._not_supports_tensor_input = any( transform.__class__.__name__ == "ToTensor" for transform in self.val_transforms.transforms ) def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. """ output = super().to_dict() output.pop("train_transforms", None) output.pop("val_transforms", None) output.pop("_not_supports_tensor_input", None) return output @classmethod def get_image_processor_dict( cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ Get the image processor dict for the model. """ image_processor_filename = kwargs.pop("image_processor_filename", "config.json") return super().get_image_processor_dict( pretrained_model_name_or_path, image_processor_filename=image_processor_filename, **kwargs ) def preprocess( self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]] = "pt", ) -> BatchFeature: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. """ if return_tensors != "pt": raise ValueError(f"return_tensors for TimmWrapperImageProcessor must be 'pt', but got {return_tensors}") if self._not_supports_tensor_input and isinstance(images, torch.Tensor): images = images.cpu().numpy() # If the input is a torch tensor, then no conversion is needed # Otherwise, we need to pass in a list of PIL images if isinstance(images, torch.Tensor): images = self.val_transforms(images) # Add batch dimension if a single image images = images.unsqueeze(0) if images.ndim == 3 else images else: images = make_list_of_images(images) images = [to_pil_image(image) for image in images] images = torch.stack([self.val_transforms(image) for image in images]) return BatchFeature({"pixel_values": images}, tensor_type=return_tensors) def save_pretrained(self, *args, **kwargs): # disable it to make checkpoint the same as in `timm` library. logger.warning_once( "The `save_pretrained` method is disabled for TimmWrapperImageProcessor. " "The image processor configuration is saved directly in `config.json` when " "`save_pretrained` is called for saving the model." ) __all__ = ["TimmWrapperImageProcessor"]
transformers/src/transformers/models/timm_wrapper/image_processing_timm_wrapper.py/0
{ "file_path": "transformers/src/transformers/models/timm_wrapper/image_processing_timm_wrapper.py", "repo_id": "transformers", "token_count": 2032 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for UDOP. """ from typing import List, Optional, Union from transformers import logging from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput logger = logging.get_logger(__name__) class UdopTextKwargs(TextKwargs, total=False): word_labels: Optional[Union[List[int], List[List[int]]]] boxes: Union[List[List[int]], List[List[List[int]]]] class UdopProcessorKwargs(ProcessingKwargs, total=False): text_kwargs: UdopTextKwargs _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "truncation": False, "stride": 0, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_length": False, "verbose": True, }, "images_kwargs": {}, } class UdopProcessor(ProcessorMixin): r""" Constructs a UDOP processor which combines a LayoutLMv3 image processor and a UDOP tokenizer into a single processor. [`UdopProcessor`] offers all the functionalities you need to prepare data for the model. It first uses [`LayoutLMv3ImageProcessor`] to resize, rescale and normalize document images, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to [`UdopTokenizer`] or [`UdopTokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned into token-level `labels` for token classification tasks (such as FUNSD, CORD). Additionally, it also supports passing `text_target` and `text_pair_target` to the tokenizer, which can be used to prepare labels for language modeling tasks. Args: image_processor (`LayoutLMv3ImageProcessor`): An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input. tokenizer (`UdopTokenizer` or `UdopTokenizerFast`): An instance of [`UdopTokenizer`] or [`UdopTokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "LayoutLMv3ImageProcessor" tokenizer_class = ("UdopTokenizer", "UdopTokenizerFast") # For backward compatibility. See transformers.processing_utils.ProcessorMixin.prepare_and_validate_optional_call_args for more details. optional_call_args = ["text_pair"] def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__( self, images: Optional[ImageInput] = None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, # The following is to capture `text_pair` argument that may be passed as a positional argument. # See transformers.processing_utils.ProcessorMixin.prepare_and_validate_optional_call_args for more details, # or this conversation for more context: https://github.com/huggingface/transformers/pull/32544#discussion_r1720208116 # This behavior is only needed for backward compatibility and will be removed in future versions. # *args, audio=None, videos=None, **kwargs: Unpack[UdopProcessorKwargs], ) -> BatchFeature: """ This method first forwards the `images` argument to [`~UdopImageProcessor.__call__`]. In case [`UdopImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and bounding boxes along with the additional arguments to [`~UdopTokenizer.__call__`] and returns the output, together with the prepared `pixel_values`. In case [`UdopImageProcessor`] was initialized with `apply_ocr` set to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional arguments to [`~UdopTokenizer.__call__`] and returns the output, together with the prepared `pixel_values`. Alternatively, one can pass `text_target` and `text_pair_target` to prepare the targets of UDOP. Please refer to the docstring of the above two methods for more information. """ # verify input output_kwargs = self._merge_kwargs( UdopProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, **self.prepare_and_validate_optional_call_args(*args), ) boxes = output_kwargs["text_kwargs"].pop("boxes", None) word_labels = output_kwargs["text_kwargs"].pop("word_labels", None) text_pair = output_kwargs["text_kwargs"].pop("text_pair", None) return_overflowing_tokens = output_kwargs["text_kwargs"].get("return_overflowing_tokens", False) return_offsets_mapping = output_kwargs["text_kwargs"].get("return_offsets_mapping", False) text_target = output_kwargs["text_kwargs"].get("text_target", None) if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens and not return_offsets_mapping: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.") if text_target is not None: # use the processor to prepare the targets of UDOP return self.tokenizer( **output_kwargs["text_kwargs"], ) else: # use the processor to prepare the inputs of UDOP # first, apply the image processor features = self.image_processor(images=images, **output_kwargs["images_kwargs"]) features_words = features.pop("words", None) features_boxes = features.pop("boxes", None) output_kwargs["text_kwargs"].pop("text_target", None) output_kwargs["text_kwargs"].pop("text_pair_target", None) output_kwargs["text_kwargs"]["text_pair"] = text_pair output_kwargs["text_kwargs"]["boxes"] = boxes if boxes is not None else features_boxes output_kwargs["text_kwargs"]["word_labels"] = word_labels # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(text, str): text = [text] # add batch dimension (as the image processor always adds a batch dimension) output_kwargs["text_kwargs"]["text_pair"] = features_words encoded_inputs = self.tokenizer( text=text if text is not None else features_words, **output_kwargs["text_kwargs"], ) # add pixel values if return_overflowing_tokens is True: features["pixel_values"] = self.get_overflowing_images( features["pixel_values"], encoded_inputs["overflow_to_sample_mapping"] ) features.update(encoded_inputs) return features # Copied from transformers.models.layoutlmv3.processing_layoutlmv3.LayoutLMv3Processor.get_overflowing_images def get_overflowing_images(self, images, overflow_to_sample_mapping): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image images_with_overflow = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx]) if len(images_with_overflow) != len(overflow_to_sample_mapping): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}" ) return images_with_overflow # Copied from transformers.models.layoutlmv3.processing_layoutlmv3.LayoutLMv3Processor.batch_decode def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) # Copied from transformers.models.layoutlmv3.processing_layoutlmv3.LayoutLMv3Processor.decode def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): return ["pixel_values", "input_ids", "bbox", "attention_mask"] __all__ = ["UdopProcessor"]
transformers/src/transformers/models/udop/processing_udop.py/0
{ "file_path": "transformers/src/transformers/models/udop/processing_udop.py", "repo_id": "transformers", "token_count": 3855 }
# coding=utf-8 # Copyright 2022 The OpenAI Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow Whisper model.""" from __future__ import annotations import math import random from typing import Dict, List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...generation.configuration_utils import GenerationConfig from ...generation.tf_logits_process import TFLogitsProcessorList from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_whisper import WhisperConfig from .tokenization_whisper import TASK_IDS, TO_LANGUAGE_CODE logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "WhisperConfig" LARGE_NEGATIVE = -1e8 def sinusoidal_embedding_init(shape, dtype=tf.float32) -> tf.Tensor: """Returns sinusoids for positional embedding""" length, channels = shape if channels % 2 != 0: raise ValueError( f"Number of channels has to be divisible by 2 for sinusoidal positional embeddings, got {channels} channels." ) log_timescale_increment = math.log(10000) / (channels // 2 - 1) inv_timescales = tf.exp(-log_timescale_increment * tf.range(channels // 2, dtype=tf.float32)) scaled_time = tf.reshape(tf.range(length, dtype=tf.float32), (-1, 1)) * tf.reshape(inv_timescales, (1, -1)) return tf.cast(tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1), dtype) # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill( (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFWhisperPositionalEmbedding(keras.layers.Layer): def __init__( self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None, embedding_initializer=None, **kwargs, ): super().__init__(**kwargs) self.num_positions = num_positions self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.embedding_initializer = keras.initializers.get(embedding_initializer) def build(self, input_shape): self.weight = self.add_weight( name="weight", shape=[self.num_positions, self.embedding_dim], initializer=self.embedding_initializer, trainable=True, ) super().build(input_shape) def call(self, input_ids, past_key_values_length=0): past_key_values_length = tf.cast(past_key_values_length, tf.int32) gather_indices = tf.range(tf.shape(input_ids)[1], delta=1) + past_key_values_length return tf.gather(self.weight, gather_indices) class TFWhisperAttention(keras.layers.Layer): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=False, name="k_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention._shape with BART->whisper def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention.call with BART->whisper def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) # Copied from transformers.models.speech_to_text.modeling_tf_speech_to_text.TFSpeech2TextEncoderLayer with Speech2Text->Whisper class TFWhisperEncoderLayer(keras.layers.Layer): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFWhisperAttention( self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" ) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool = False ): """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)` """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training, ) tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return hidden_states, self_attn_weights def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.encoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) # Copied from transformers.models.speech_to_text.modeling_tf_speech_to_text.TFSpeech2TextDecoderLayer with Speech2Text->Whisper class TFWhisperDecoderLayer(keras.layers.Layer): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFWhisperAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TFWhisperAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states, attention_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, cross_attn_layer_head_mask: tf.Tensor | None = None, past_key_value: Tuple[tf.Tensor] | None = None, training=False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`tf.Tensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(decoder_attention_heads,)` cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. `(decoder_attention_heads,)` past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, training=training, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "encoder_attn", None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, "encoder_attn_layer_norm", None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.decoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFWhisperPreTrainedModel(TFPreTrainedModel): config_class = WhisperConfig base_model_prefix = "model" main_input_name = "input_features" def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor) -> int: """ Computes the output length of the convolutional layers """ input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs. """ return { self.main_input_name: tf.random.uniform( [1, self.config.num_mel_bins, self.config.max_source_positions * 2 - 1], dtype=tf.float32 ), "decoder_input_ids": tf.constant([[1, 3]], dtype=tf.int32), } @property def input_signature(self): return { "input_features": tf.TensorSpec((None, self.config.num_mel_bins, None), tf.float32, name="input_features"), "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"), "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"), } WHISPER_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. Parameters: config ([`WhisperConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ WHISPER_INPUTS_DOCSTRING = r""" Args: input_features (`tf.Tensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `tf.Tensor`. See [`~WhisperFeatureExtractor.__call__`] decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @keras_serializable class TFWhisperEncoder(keras.layers.Layer): config_class = WhisperConfig """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TFWhisperEncoderLayer`]. Args: config: WhisperConfig embed_tokens (TFWhisperEmbedding): output embedding """ def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layerdrop = config.encoder_layerdrop self.embed_dim = config.d_model self.num_mel_bins = config.num_mel_bins self.padding_idx = config.pad_token_id self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(self.embed_dim) if config.scale_embedding else 1.0 # Padding is added in call() to match the PyTorch implementation self.conv1 = keras.layers.Conv1D(self.embed_dim, kernel_size=3, strides=1, padding="valid", name="conv1") self.conv2 = keras.layers.Conv1D(self.embed_dim, kernel_size=3, strides=2, padding="valid", name="conv2") self.embed_positions = TFWhisperPositionalEmbedding( num_positions=self.max_source_positions, embedding_dim=self.embed_dim, embedding_initializer=sinusoidal_embedding_init, name="embed_positions", ) self.embed_positions.trainable = False self.encoder_layers = [TFWhisperEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") self.dropout = keras.layers.Dropout(config.dropout) @unpack_inputs def call( self, input_features=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_features (`tf.Tensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a tensor of type `tf.Tensor`. See [`~WhisperFeatureExtractor.__call__`] head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TF 2.0 layers can't use channels first format when running on CPU. input_features = tf.transpose(input_features, perm=(0, 2, 1)) input_features = tf.pad(input_features, [[0, 0], [1, 1], [0, 0]]) inputs_embeds = keras.activations.gelu(self.conv1(input_features)) inputs_embeds = tf.pad(inputs_embeds, [[0, 0], [1, 1], [0, 0]]) inputs_embeds = keras.activations.gelu(self.conv2(inputs_embeds)) inputs_embeds = tf.transpose(inputs_embeds, perm=(0, 1, 2)) embed_pos = self.embed_positions(input_ids=tf.zeros((1, self.max_source_positions), dtype=tf.int32)) hidden_states = inputs_embeds + embed_pos hidden_states = self.dropout(hidden_states, training=training) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.encoder_layers), message=( f"The head_mask should be specified for {len(self.encoder_layers)} layers, but it is for" f" {shape_list(head_mask)[0]}." ), ) for idx, encoder_layer in enumerate(self.encoder_layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): # skip the layer continue hidden_states, attn = encoder_layer( hidden_states, None, layer_head_mask=(head_mask[idx] if head_mask is not None else None), training=training, ) if output_attentions: all_attentions += (attn,) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build([None, None, self.num_mel_bins]) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build([None, None, self.embed_dim]) if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "encoder_layers", None) is not None: for layer in self.encoder_layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFWhisperDecoder(keras.layers.Layer): config_class = WhisperConfig """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFWhisperDecoderLayer`] Args: config: WhisperConfig """ def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = keras.layers.Dropout(config.dropout) self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std), name="embed_tokens", ) self.embed_positions = TFWhisperPositionalEmbedding( self.max_target_positions, config.d_model, name="embed_positions" ) self.decoder_layers = [TFWhisperDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] batch_size, seq_len = input_shape[0], input_shape[1] combined_attention_mask = tf.cond( tf.math.greater(seq_len, 1), lambda: _make_causal_mask(input_shape, past_key_values_length=past_key_values_length), lambda: _expand_mask(tf.ones((batch_size, seq_len + past_key_values_length)), tgt_len=seq_len), ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask @unpack_inputs def call( self, input_ids=None, attention_mask=None, position_ids=None, encoder_hidden_states=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = tf.shape(input_ids) input_ids = tf.reshape(input_ids, (-1, input_shape[-1])) elif inputs_embeds is not None: input_shape = tf.shape(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = tf.shape(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) # embed positions filled_past_positions = past_key_values_length if position_ids is None else position_ids[0, -1] positions = self.embed_positions(input_ids, past_key_values_length=filled_past_positions) hidden_states = inputs_embeds + positions hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.decoder_layers), message=( f"The {attn_mask_name} should be specified for {len(self.decoder_layers)} layers, but it is" f" for {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.decoder_layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_value=past_key_value, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_tokens", None) is not None: with tf.name_scope(self.embed_tokens.name): self.embed_tokens.build(None) if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "decoder_layers", None) is not None: for layer in self.decoder_layers: with tf.name_scope(layer.name): layer.build(None) @add_start_docstrings( "The bare Whisper Model outputting raw hidden-states without any specific head on top.", WHISPER_START_DOCSTRING, ) @keras_serializable class TFWhisperMainLayer(keras.layers.Layer): config_class = WhisperConfig def __init__(self, config: WhisperConfig, **kwargs): super().__init__(**kwargs) self.config = config self.encoder = TFWhisperEncoder(config, name="encoder") self.decoder = TFWhisperDecoder(config, name="decoder") def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import TFWhisperModel, AutoFeatureExtractor >>> from datasets import load_dataset >>> model = TFWhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_features, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): encoder_outputs = TFBaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings( "The bare Whisper Model outputting raw hidden-states without any specific head on top.", WHISPER_START_DOCSTRING, ) class TFWhisperModel(TFWhisperPreTrainedModel): def __init__(self, config: WhisperConfig, **kwargs): super().__init__(config, **kwargs) self.model = TFWhisperMainLayer(config, name="model") def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder def decoder(self): return self.model.decoder def encoder(self): return self.model.encoder @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features: TFModelInputType | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, decoder_position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, decoder_head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, decoder_inputs_embeds: Optional[Tuple[Union[np.ndarray, tf.Tensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]: r""" Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import TFWhisperModel, AutoFeatureExtractor >>> from datasets import load_dataset >>> model = TFWhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" outputs = self.model( input_features=input_features, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqModelOutput( last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) @add_start_docstrings( "The Whisper Model with a language modeling head. Can be used for automatic speech recognition.", WHISPER_START_DOCSTRING, ) class TFWhisperForConditionalGeneration(TFWhisperPreTrainedModel, TFCausalLanguageModelingLoss): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"encoder.version", r"decoder.version", r"proj_out.weight", ] _keys_to_ignore_on_save = [ r"proj_out.weight", ] def __init__(self, config: WhisperConfig, **kwargs): super().__init__(config, **kwargs) self.model = TFWhisperMainLayer(config, name="model") def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def resize_token_embeddings(self, new_num_tokens: int) -> keras.layers.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) return new_embeddings @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, input_features: TFModelInputType | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, decoder_position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, decoder_head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, decoder_inputs_embeds: Optional[Tuple[Union[np.ndarray, tf.Tensor]]] = None, labels: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import AutoProcessor, TFWhisperForConditionalGeneration >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="tf") >>> input_features = inputs.input_features >>> generated_ids = model.generate(input_features=input_features) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> transcription ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_features, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) decoder_last_hidden_state = outputs[0] # Decoder and encoder embeddings are tied lm_logits = tf.matmul(decoder_last_hidden_state, self.get_output_embeddings().weights, transpose_b=True) loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSeq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def generate( self, inputs: Optional[tf.Tensor] = None, generation_config: Optional[GenerationConfig] = None, logits_processor: Optional[TFLogitsProcessorList] = None, seed: Optional[List[int]] = None, return_timestamps: Optional[bool] = None, task: Optional[str] = None, language: Optional[str] = None, is_multilingual: Optional[bool] = None, prompt_ids: Optional[tf.Tensor] = None, return_token_timestamps=None, **kwargs, ): r""" Generates sequences of token ids for models with a language modeling head. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate, e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](../generation_strategies). </Tip> Parameters: inputs (`tf.Tensor` of varying shape depending on the modality, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If unset the method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of `input_ids`, `input_values`, `input_features`, or `pixel_values`. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. seed (`List[int]`, *optional*): Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the `seed` argument from stateless functions in `tf.random`. return_timestamps (`bool`, *optional*): Whether to return the timestamps with the text. This enables the `TFWhisperTimestampsLogitsProcessor`. task (`str`, *optional*): Task to use for generation, either "translate" or "transcribe". The `model.config.forced_decoder_ids` will be updated accordingly. language (`str`, *optional*): Language token to use for generation, can be either in the form of `<|en|>`, `en` or `english`. You can find all the possible language tokens in the `model.generation_config.lang_to_id` dictionary. is_multilingual (`bool`, *optional*): Whether or not the model is multilingual. prompt_ids (`tf.Tensor`, *optional*): Rank-1 tensor of token IDs created by passing text to [`~WhisperProcessor.get_prompt_ids`] that is provided as a prompt to each chunk. This can be used to provide or "prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns to make it more likely to predict those words correctly. It cannot be used in conjunction with `decoder_start_token_id` as it overwrites this value. return_token_timestamps (`bool`, *optional*): Whether to return token-level timestamps with the text. This can be used with or without the `return_timestamps` option. To get word-level timestamps, use the tokenizer to group the tokens into words. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `tf.Tensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `tf.Tensor`. If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible [`~utils.ModelOutput`] types are: - [`~generation.TFGreedySearchDecoderOnlyOutput`], - [`~generation.TFSampleDecoderOnlyOutput`], - [`~generation.TFBeamSearchDecoderOnlyOutput`], - [`~generation.TFBeamSampleDecoderOnlyOutput`] If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.TFGreedySearchEncoderDecoderOutput`], - [`~generation.TFSampleEncoderDecoderOutput`], - [`~generation.TFBeamSearchEncoderDecoderOutput`], - [`~generation.TFBeamSampleEncoderDecoderOutput`] """ if generation_config is None: generation_config = self.generation_config if return_timestamps is not None: if not hasattr(generation_config, "no_timestamps_token_id"): raise ValueError( "You are trying to return timestamps, but the generation config is not properly set. " "Make sure to initialize the generation config with the correct attributes that are needed such as `no_timestamps_token_id`. " "For more details on how to generate the approtiate config, refer to https://github.com/huggingface/transformers/issues/21878#issuecomment-1451902363" ) generation_config.return_timestamps = return_timestamps else: generation_config.return_timestamps = False if language is not None: language = language.lower() generation_config.language = language if task is not None: generation_config.task = task forced_decoder_ids = None # Legacy code for backward compatibility if hasattr(self.config, "forced_decoder_ids") and self.config.forced_decoder_ids is not None: forced_decoder_ids = self.config.forced_decoder_ids elif ( hasattr(self.generation_config, "forced_decoder_ids") and self.generation_config.forced_decoder_ids is not None ): forced_decoder_ids = self.generation_config.forced_decoder_ids else: forced_decoder_ids = kwargs.get("forced_decoder_ids", None) if task is not None or language is not None or (forced_decoder_ids is None and prompt_ids is not None): forced_decoder_ids = [] if hasattr(generation_config, "language"): if generation_config.language in generation_config.lang_to_id.keys(): language_token = generation_config.language elif generation_config.language in TO_LANGUAGE_CODE.keys(): language_token = f"<|{TO_LANGUAGE_CODE[generation_config.language]}|>" elif generation_config.language in TO_LANGUAGE_CODE.values(): language_token = f"<|{generation_config.language}|>" else: is_language_code = len(generation_config.language) == 2 raise ValueError( f"Unsupported language: {generation_config.language}. Language should be one of:" f" {list(TO_LANGUAGE_CODE.values()) if is_language_code else list(TO_LANGUAGE_CODE.keys())}." ) if language_token not in generation_config.lang_to_id: raise ValueError( f"{language_token} is not supported by this specific model as it is not in the `generation_config.lang_to_id`." "(You should just add it to the generation config)" ) forced_decoder_ids.append((1, generation_config.lang_to_id[language_token])) else: forced_decoder_ids.append((1, None)) # automatically detect the language if hasattr(generation_config, "task"): if generation_config.task in TASK_IDS: forced_decoder_ids.append((2, generation_config.task_to_id[generation_config.task])) else: raise ValueError( f"The `{generation_config.task}`task is not supported. The task should be one of `{TASK_IDS}`" ) elif hasattr(generation_config, "task_to_id"): forced_decoder_ids.append((2, generation_config.task_to_id["transcribe"])) # defaults to transcribe if hasattr(generation_config, "no_timestamps_token_id") and not generation_config.return_timestamps: idx = forced_decoder_ids[-1][0] + 1 if forced_decoder_ids else 1 forced_decoder_ids.append((idx, generation_config.no_timestamps_token_id)) if forced_decoder_ids is not None: generation_config.forced_decoder_ids = forced_decoder_ids if prompt_ids is not None: if kwargs.get("decoder_start_token_id") is not None: raise ValueError( "When specifying `prompt_ids`, you cannot also specify `decoder_start_token_id` as it gets overwritten." ) prompt_ids = prompt_ids.tolist() decoder_start_token_id, *text_prompt_ids = prompt_ids # Slicing the text prompt ids in a manner consistent with the OpenAI implementation # to accommodate context space for the prefix (see https://github.com/openai/whisper/blob/c09a7ae299c4c34c5839a76380ae407e7d785914/whisper/decoding.py#L599) text_prompt_ids = text_prompt_ids[-self.config.max_length // 2 - 1 :] # Set the decoder_start_token_id to <|startofprev|> kwargs.update({"decoder_start_token_id": decoder_start_token_id}) # Update the max generation length to include the prompt specified_max_length = kwargs.pop("max_new_tokens", None) or kwargs.pop("max_length", None) default_max_length = generation_config.max_new_tokens or generation_config.max_length non_prompt_max_length = specified_max_length or default_max_length kwargs["max_new_tokens"] = non_prompt_max_length + len(text_prompt_ids) # Reformat the forced_decoder_ids to incorporate the prompt non_prompt_forced_decoder_ids = ( kwargs.pop("forced_decoder_ids", None) or generation_config.forced_decoder_ids ) forced_decoder_ids = [ *text_prompt_ids, generation_config.decoder_start_token_id, *[token for _rank, token in non_prompt_forced_decoder_ids], ] forced_decoder_ids = [(rank + 1, token) for rank, token in enumerate(forced_decoder_ids)] generation_config.forced_decoder_ids = forced_decoder_ids # TODO: Implement `WhisperTimeStampLogitsProcessor`. if generation_config.return_timestamps: # logits_processor = [TFWhisperTimeStampLogitsProcessor(generation_config)] raise ValueError("`TFWhisperForConditionalGeneration` doesn't support returning the timestamps yet.") if return_token_timestamps: kwargs["output_attentions"] = True kwargs["return_dict_in_generate"] = True if getattr(generation_config, "task", None) == "translate": logger.warning("Token-level timestamps may not be reliable for task 'translate'.") if not hasattr(generation_config, "alignment_heads"): raise ValueError( "Model generation config has no `alignment_heads`, token-level timestamps not available. " "See https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a on how to add this property to the generation config." ) outputs = super().generate( inputs, generation_config, logits_processor, **kwargs, ) if return_token_timestamps and hasattr(generation_config, "alignment_heads"): outputs["token_timestamps"] = self._extract_token_timestamps(outputs, generation_config.alignment_heads) return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput( logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, use_cache=None, encoder_outputs=None, attention_mask=None, decoder_attention_mask=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_attention_mask is not None: # xla decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] elif past_key_values is not None: # no xla + past decoder_position_ids = past_key_values[0][0].shape[2] else: # no xla + no past decoder_position_ids = tf.range(decoder_input_ids.shape[1]) decoder_position_ids = tf.broadcast_to(decoder_position_ids, decoder_input_ids.shape) return { "input_features": None, # Needs to be passed to make Keras.layer.__call__ happy "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "use_cache": use_cache, "decoder_attention_mask": decoder_attention_mask, "decoder_position_ids": decoder_position_ids, } def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) __all__ = ["TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel"]
transformers/src/transformers/models/whisper/modeling_tf_whisper.py/0
{ "file_path": "transformers/src/transformers/models/whisper/modeling_tf_whisper.py", "repo_id": "transformers", "token_count": 37373 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert YOLOS checkpoints from the original repository. URL: https://github.com/hustvl/YOLOS""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_yolos_config(yolos_name: str) -> YolosConfig: config = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: config.hidden_size = 192 config.intermediate_size = 768 config.num_hidden_layers = 12 config.num_attention_heads = 3 config.image_size = [800, 1333] config.use_mid_position_embeddings = False elif yolos_name == "yolos_s_dWr": config.hidden_size = 330 config.num_hidden_layers = 14 config.num_attention_heads = 6 config.intermediate_size = 1320 elif "yolos_s" in yolos_name: config.hidden_size = 384 config.intermediate_size = 1536 config.num_hidden_layers = 12 config.num_attention_heads = 6 elif "yolos_b" in yolos_name: config.image_size = [800, 1344] config.num_labels = 91 repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict: dict, config: YolosConfig, base_model: bool = False): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :] state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def rename_key(name: str) -> str: if "backbone" in name: name = name.replace("backbone", "vit") if "cls_token" in name: name = name.replace("cls_token", "embeddings.cls_token") if "det_token" in name: name = name.replace("det_token", "embeddings.detection_tokens") if "mid_pos_embed" in name: name = name.replace("mid_pos_embed", "encoder.mid_position_embeddings") if "pos_embed" in name: name = name.replace("pos_embed", "embeddings.position_embeddings") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") if "blocks" in name: name = name.replace("blocks", "encoder.layer") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "class_embed" in name: name = name.replace("class_embed", "class_labels_classifier") if "bbox_embed" in name: name = name.replace("bbox_embed", "bbox_predictor") if "vit.norm" in name: name = name.replace("vit.norm", "vit.layernorm") return name def convert_state_dict(orig_state_dict: dict, model: YolosForObjectDetection) -> dict: for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "qkv" in key: key_split = key.split(".") layer_num = int(key_split[2]) dim = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.key.weight"] = val[ dim : dim * 2, : ] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.query.bias"] = val[:dim] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.key.bias"] = val[dim : dim * 2] orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.value.bias"] = val[-dim:] else: orig_state_dict[rename_key(key)] = val return orig_state_dict # We will verify our results on an image of cute cats def prepare_img() -> torch.Tensor: url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_yolos_checkpoint( yolos_name: str, checkpoint_path: str, pytorch_dump_folder_path: str, push_to_hub: bool = False ): """ Copy/paste/tweak model's weights to our YOLOS structure. """ config = get_yolos_config(yolos_name) # load original state_dict state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] # load 🤗 model model = YolosForObjectDetection(config) model.eval() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) # Check outputs on an image, prepared by YolosImageProcessor size = 800 if yolos_name != "yolos_ti" else 512 image_processor = YolosImageProcessor(format="coco_detection", size=size) encoding = image_processor(images=prepare_img(), return_tensors="pt") outputs = model(**encoding) logits, pred_boxes = outputs.logits, outputs.pred_boxes expected_slice_logits, expected_slice_boxes = None, None if yolos_name == "yolos_ti": expected_slice_logits = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) expected_slice_boxes = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": expected_slice_logits = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) expected_slice_boxes = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": expected_slice_logits = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) expected_slice_boxes = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": expected_slice_logits = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) expected_slice_boxes = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": expected_slice_logits = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) expected_slice_boxes = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}") assert torch.allclose(logits[0, :3, :3], expected_slice_logits, atol=1e-4) assert torch.allclose(pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model_mapping = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub...") model_name = model_mapping[yolos_name] image_processor.push_to_hub(model_name, organization="hustvl") model.push_to_hub(model_name, organization="hustvl") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/yolos/convert_yolos_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/yolos/convert_yolos_to_pytorch.py", "repo_id": "transformers", "token_count": 5081 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ZoeDepth model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING logger = logging.get_logger(__name__) ZOEDEPTH_PRETRAINED_CONFIG_ARCHIVE_MAP = { "Intel/zoedepth-nyu": "https://huggingface.co/Intel/zoedepth-nyu/resolve/main/config.json", } class ZoeDepthConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ZoeDepthForDepthEstimation`]. It is used to instantiate an ZoeDepth model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ZoeDepth [Intel/zoedepth-nyu](https://huggingface.co/Intel/zoedepth-nyu) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*, defaults to `BeitConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. batch_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the batch normalization layers. readout_type (`str`, *optional*, defaults to `"project"`): The readout type to use when processing the readout token (CLS token) of the intermediate hidden states of the ViT backbone. Can be one of [`"ignore"`, `"add"`, `"project"`]. - "ignore" simply ignores the CLS token. - "add" passes the information from the CLS token to all other tokens by adding the representations. - "project" passes information to the other tokens by concatenating the readout to all other tokens before projecting the representation to the original feature dimension D using a linear layer followed by a GELU non-linearity. reassemble_factors (`List[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`): The up/downsampling factors of the reassemble layers. neck_hidden_sizes (`List[str]`, *optional*, defaults to `[96, 192, 384, 768]`): The hidden sizes to project to for the feature maps of the backbone. fusion_hidden_size (`int`, *optional*, defaults to 256): The number of channels before fusion. head_in_index (`int`, *optional*, defaults to -1): The index of the features to use in the heads. use_batch_norm_in_fusion_residual (`bool`, *optional*, defaults to `False`): Whether to use batch normalization in the pre-activate residual units of the fusion blocks. use_bias_in_fusion_residual (`bool`, *optional*, defaults to `True`): Whether to use bias in the pre-activate residual units of the fusion blocks. num_relative_features (`int`, *optional*, defaults to 32): The number of features to use in the relative depth estimation head. add_projection (`bool`, *optional*, defaults to `False`): Whether to add a projection layer before the depth estimation head. bottleneck_features (`int`, *optional*, defaults to 256): The number of features in the bottleneck layer. num_attractors (`List[int], *optional*, defaults to `[16, 8, 4, 1]`): The number of attractors to use in each stage. bin_embedding_dim (`int`, *optional*, defaults to 128): The dimension of the bin embeddings. attractor_alpha (`int`, *optional*, defaults to 1000): The alpha value to use in the attractor. attractor_gamma (`int`, *optional*, defaults to 2): The gamma value to use in the attractor. attractor_kind (`str`, *optional*, defaults to `"mean"`): The kind of attractor to use. Can be one of [`"mean"`, `"sum"`]. min_temp (`float`, *optional*, defaults to 0.0212): The minimum temperature value to consider. max_temp (`float`, *optional*, defaults to 50.0): The maximum temperature value to consider. bin_centers_type (`str`, *optional*, defaults to `"softplus"`): Activation type used for bin centers. Can be "normed" or "softplus". For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. bin_configurations (`List[dict]`, *optional*, defaults to `[{'n_bins': 64, 'min_depth': 0.001, 'max_depth': 10.0}]`): Configuration for each of the bin heads. Each configuration should consist of the following keys: - name (`str`): The name of the bin head - only required in case of multiple bin configurations. - `n_bins` (`int`): The number of bins to use. - `min_depth` (`float`): The minimum depth value to consider. - `max_depth` (`float`): The maximum depth value to consider. In case only a single configuration is passed, the model will use a single head with the specified configuration. In case multiple configurations are passed, the model will use multiple heads with the specified configurations. num_patch_transformer_layers (`int`, *optional*): The number of transformer layers to use in the patch transformer. Only used in case of multiple bin configurations. patch_transformer_hidden_size (`int`, *optional*): The hidden size to use in the patch transformer. Only used in case of multiple bin configurations. patch_transformer_intermediate_size (`int`, *optional*): The intermediate size to use in the patch transformer. Only used in case of multiple bin configurations. patch_transformer_num_attention_heads (`int`, *optional*): The number of attention heads to use in the patch transformer. Only used in case of multiple bin configurations. Example: ```python >>> from transformers import ZoeDepthConfig, ZoeDepthForDepthEstimation >>> # Initializing a ZoeDepth zoedepth-large style configuration >>> configuration = ZoeDepthConfig() >>> # Initializing a model from the zoedepth-large style configuration >>> model = ZoeDepthForDepthEstimation(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "zoedepth" def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, backbone_kwargs=None, hidden_act="gelu", initializer_range=0.02, batch_norm_eps=1e-05, readout_type="project", reassemble_factors=[4, 2, 1, 0.5], neck_hidden_sizes=[96, 192, 384, 768], fusion_hidden_size=256, head_in_index=-1, use_batch_norm_in_fusion_residual=False, use_bias_in_fusion_residual=None, num_relative_features=32, add_projection=False, bottleneck_features=256, num_attractors=[16, 8, 4, 1], bin_embedding_dim=128, attractor_alpha=1000, attractor_gamma=2, attractor_kind="mean", min_temp=0.0212, max_temp=50.0, bin_centers_type="softplus", bin_configurations=[{"n_bins": 64, "min_depth": 0.001, "max_depth": 10.0}], num_patch_transformer_layers=None, patch_transformer_hidden_size=None, patch_transformer_intermediate_size=None, patch_transformer_num_attention_heads=None, **kwargs, ): super().__init__(**kwargs) if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']") if attractor_kind not in ["mean", "sum"]: raise ValueError("Attractor_kind must be one of ['mean', 'sum']") if use_pretrained_backbone: raise ValueError("Pretrained backbones are not supported yet.") if backbone_config is not None and backbone is not None: raise ValueError("You can't specify both `backbone` and `backbone_config`.") if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `BEiT` backbone.") backbone_config = CONFIG_MAPPING["beit"]( image_size=384, num_hidden_layers=24, hidden_size=1024, intermediate_size=4096, num_attention_heads=16, use_relative_position_bias=True, reshape_hidden_states=False, out_features=["stage6", "stage12", "stage18", "stage24"], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") self.backbone_config = backbone_config self.backbone = backbone self.hidden_act = hidden_act self.use_pretrained_backbone = use_pretrained_backbone self.initializer_range = initializer_range self.batch_norm_eps = batch_norm_eps self.readout_type = readout_type self.reassemble_factors = reassemble_factors self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size self.head_in_index = head_in_index self.use_batch_norm_in_fusion_residual = use_batch_norm_in_fusion_residual self.use_bias_in_fusion_residual = use_bias_in_fusion_residual self.num_relative_features = num_relative_features self.add_projection = add_projection self.bottleneck_features = bottleneck_features self.num_attractors = num_attractors self.bin_embedding_dim = bin_embedding_dim self.attractor_alpha = attractor_alpha self.attractor_gamma = attractor_gamma self.attractor_kind = attractor_kind self.min_temp = min_temp self.max_temp = max_temp self.bin_centers_type = bin_centers_type self.bin_configurations = bin_configurations self.num_patch_transformer_layers = num_patch_transformer_layers self.patch_transformer_hidden_size = patch_transformer_hidden_size self.patch_transformer_intermediate_size = patch_transformer_intermediate_size self.patch_transformer_num_attention_heads = patch_transformer_num_attention_heads __all__ = ["ZOEDEPTH_PRETRAINED_CONFIG_ARCHIVE_MAP", "ZoeDepthConfig"]
transformers/src/transformers/models/zoedepth/configuration_zoedepth.py/0
{ "file_path": "transformers/src/transformers/models/zoedepth/configuration_zoedepth.py", "repo_id": "transformers", "token_count": 4838 }
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import copy import csv import importlib import json import os import pickle import sys import traceback import types import warnings from abc import ABC, abstractmethod from collections import UserDict from contextlib import contextmanager from os.path import abspath, exists from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from ..dynamic_module_utils import custom_object_save from ..feature_extraction_utils import PreTrainedFeatureExtractor from ..image_processing_utils import BaseImageProcessor from ..modelcard import ModelCard from ..models.auto import AutoConfig, AutoTokenizer from ..processing_utils import ProcessorMixin from ..tokenization_utils import PreTrainedTokenizer from ..utils import ( ModelOutput, PushToHubMixin, add_end_docstrings, copy_func, infer_framework, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xpu_available, logging, ) GenericTensor = Union[List["GenericTensor"], "torch.Tensor", "tf.Tensor"] if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TFAutoModel if is_torch_available(): import torch from torch.utils.data import DataLoader, Dataset from ..modeling_utils import PreTrainedModel from ..models.auto.modeling_auto import AutoModel # Re-export for backward compatibility from .pt_utils import KeyDataset else: Dataset = None KeyDataset = None if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_utils import PreTrainedModel logger = logging.get_logger(__name__) def no_collate_fn(items): if len(items) != 1: raise ValueError("This collate_fn is meant to be used with batch_size=1") return items[0] def _pad(items, key, padding_value, padding_side): batch_size = len(items) if isinstance(items[0][key], torch.Tensor): # Others include `attention_mask` etc... shape = items[0][key].shape dim = len(shape) if dim == 1: # We have a list of 1-dim torch tensors, which can be stacked without padding return torch.cat([item[key] for item in items], dim=0) if key in ["pixel_values", "image"]: # This is probable image so padding shouldn't be necessary # B, C, H, W return torch.cat([item[key] for item in items], dim=0) elif dim == 4 and key == "input_features": # this is probably a mel spectrogram batched return torch.cat([item[key] for item in items], dim=0) max_length = max(item[key].shape[1] for item in items) min_length = min(item[key].shape[1] for item in items) dtype = items[0][key].dtype if dim == 2: if max_length == min_length: # Bypass for `ImageGPT` which doesn't provide a padding value, yet # we can consistently pad since the size should be matching return torch.cat([item[key] for item in items], dim=0) tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value elif dim == 3: tensor = torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype) + padding_value elif dim == 4: tensor = torch.zeros((batch_size, max_length, shape[-2], shape[-1]), dtype=dtype) + padding_value for i, item in enumerate(items): if dim == 2: if padding_side == "left": tensor[i, -len(item[key][0]) :] = item[key][0].clone() else: tensor[i, : len(item[key][0])] = item[key][0].clone() elif dim == 3: if padding_side == "left": tensor[i, -len(item[key][0]) :, :] = item[key][0].clone() else: tensor[i, : len(item[key][0]), :] = item[key][0].clone() elif dim == 4: if padding_side == "left": tensor[i, -len(item[key][0]) :, :, :] = item[key][0].clone() else: tensor[i, : len(item[key][0]), :, :] = item[key][0].clone() return tensor else: return [item[key] for item in items] def pad_collate_fn(tokenizer, feature_extractor): # Tokenizer t_padding_side = None # Feature extractor f_padding_side = None if tokenizer is None and feature_extractor is None: raise ValueError("Pipeline without tokenizer or feature_extractor cannot do batching") if tokenizer is not None: if tokenizer.pad_token_id is None: raise ValueError( "Pipeline with tokenizer without pad_token cannot do batching. You can try to set it with " "`pipe.tokenizer.pad_token_id = model.config.eos_token_id`." ) else: t_padding_value = tokenizer.pad_token_id t_padding_side = tokenizer.padding_side if feature_extractor is not None: # Feature extractor can be images, where no padding is expected f_padding_value = getattr(feature_extractor, "padding_value", None) f_padding_side = getattr(feature_extractor, "padding_side", None) if t_padding_side is not None and f_padding_side is not None and t_padding_side != f_padding_side: raise ValueError( f"The feature extractor, and tokenizer don't agree on padding side {t_padding_side} != {f_padding_side}" ) padding_side = "right" if t_padding_side is not None: padding_side = t_padding_side if f_padding_side is not None: padding_side = f_padding_side def inner(items): keys = set(items[0].keys()) for item in items: if set(item.keys()) != keys: raise ValueError( f"The elements of the batch contain different keys. Cannot batch them ({set(item.keys())} !=" f" {keys})" ) # input_values, input_pixels, input_ids, ... padded = {} for key in keys: if key in {"input_ids"}: # ImageGPT uses a feature extractor if tokenizer is None and feature_extractor is not None: _padding_value = f_padding_value else: _padding_value = t_padding_value elif key in {"input_values", "pixel_values", "input_features"}: _padding_value = f_padding_value elif key in {"p_mask", "special_tokens_mask"}: _padding_value = 1 elif key in {"attention_mask", "token_type_ids"}: _padding_value = 0 else: # This is likely another random key maybe even user provided _padding_value = 0 padded[key] = _pad(items, key, _padding_value, padding_side) return padded return inner def infer_framework_load_model( model, config: AutoConfig, model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. config ([`AutoConfig`]): The config associated with the model to help using the correct class model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. """ if not is_tf_available() and not is_torch_available(): raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) if isinstance(model, str): model_kwargs["_from_pipeline"] = task class_tuple = () look_pt = is_torch_available() and framework in {"pt", None} look_tf = is_tf_available() and framework in {"tf", None} if model_classes: if look_pt: class_tuple = class_tuple + model_classes.get("pt", (AutoModel,)) if look_tf: class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,)) if config.architectures: classes = [] for architecture in config.architectures: transformers_module = importlib.import_module("transformers") if look_pt: _class = getattr(transformers_module, architecture, None) if _class is not None: classes.append(_class) if look_tf: _class = getattr(transformers_module, f"TF{architecture}", None) if _class is not None: classes.append(_class) class_tuple = class_tuple + tuple(classes) if len(class_tuple) == 0: raise ValueError(f"Pipeline cannot infer suitable model classes from {model}") all_traceback = {} for model_class in class_tuple: kwargs = model_kwargs.copy() if framework == "pt" and model.endswith(".h5"): kwargs["from_tf"] = True logger.warning( "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. " "Trying to load the model with PyTorch." ) elif framework == "tf" and model.endswith(".bin"): kwargs["from_pt"] = True logger.warning( "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. " "Trying to load the model with Tensorflow." ) try: model = model_class.from_pretrained(model, **kwargs) if hasattr(model, "eval"): model = model.eval() # Stop loading on the first successful load. break except (OSError, ValueError): all_traceback[model_class.__name__] = traceback.format_exc() continue if isinstance(model, str): error = "" for class_name, trace in all_traceback.items(): error += f"while loading with {class_name}, an error is thrown:\n{trace}\n" raise ValueError( f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n" ) if framework is None: framework = infer_framework(model.__class__) return framework, model def infer_framework_from_model( model, model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. """ if isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs) else: config = model.config return infer_framework_load_model( model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs ) def get_framework(model, revision: Optional[str] = None): """ Select framework (TensorFlow or PyTorch) to use. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): If both frameworks are installed, picks the one corresponding to the model passed (either a model class or the model name). If no specific model is provided, defaults to using PyTorch. """ warnings.warn( "`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.", FutureWarning, ) if not is_tf_available() and not is_torch_available(): raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) if isinstance(model, str): if is_torch_available() and not is_tf_available(): model = AutoModel.from_pretrained(model, revision=revision) elif is_tf_available() and not is_torch_available(): model = TFAutoModel.from_pretrained(model, revision=revision) else: try: model = AutoModel.from_pretrained(model, revision=revision) except OSError: model = TFAutoModel.from_pretrained(model, revision=revision) framework = infer_framework(model.__class__) return framework def get_default_model_and_revision( targeted_task: Dict, framework: Optional[str], task_options: Optional[Any] ) -> Union[str, Tuple[str, str]]: """ Select a default model to use for a given task. Defaults to pytorch if ambiguous. Args: targeted_task (`Dict`): Dictionary representing the given task, that should contain default models framework (`str`, None) "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet. task_options (`Any`, None) Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for translation task. Returns `str` The model string representing the default model for this pipeline """ if is_torch_available() and not is_tf_available(): framework = "pt" elif is_tf_available() and not is_torch_available(): framework = "tf" defaults = targeted_task["default"] if task_options: if task_options not in defaults: raise ValueError(f"The task does not provide any default models for options {task_options}") default_models = defaults[task_options]["model"] elif "model" in defaults: default_models = targeted_task["default"]["model"] else: # XXX This error message needs to be updated to be more generic if more tasks are going to become # parametrized raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"') if framework is None: framework = "pt" return default_models[framework] def load_assistant_model( model: "PreTrainedModel", assistant_model: Optional[Union[str, "PreTrainedModel"]], assistant_tokenizer: Optional[PreTrainedTokenizer], ) -> Tuple[Optional["PreTrainedModel"], Optional[PreTrainedTokenizer]]: """ Prepares the assistant model and the assistant tokenizer for a pipeline whose model that can call `generate`. Args: model ([`PreTrainedModel`]): The main model that will be used by the pipeline to make predictions. assistant_model (`str` or [`PreTrainedModel`], *optional*): The assistant model that will be used by the pipeline to make predictions. assistant_tokenizer ([`PreTrainedTokenizer`], *optional*): The assistant tokenizer that will be used by the pipeline to encode data for the model. Returns: Tuple: The loaded assistant model and (optionally) the loaded tokenizer. """ if not model.can_generate() or assistant_model is None: return None, None if getattr(model, "framework") != "pt" or not isinstance(model, PreTrainedModel): raise ValueError( "Assisted generation, triggered by the `assistant_model` argument, is only available for " "`PreTrainedModel` model instances. For instance, TF or JAX models are not supported." ) # If the model is passed as a string, load the model and the corresponding tokenizer if isinstance(assistant_model, str): assistant_config = AutoConfig.from_pretrained(assistant_model) _, loaded_assistant_model = infer_framework_load_model(assistant_model, config=assistant_config) loaded_assistant_model = loaded_assistant_model.to(device=model.device, dtype=model.dtype) loaded_assistant_tokenizer = AutoTokenizer.from_pretrained(assistant_model) else: loaded_assistant_model = assistant_model loaded_assistant_tokenizer = assistant_tokenizer # Finally, let's check the tokenizers: if the two models have different tokenizers, we need to keep the assistant # tokenizer same_vocab_size = model.config.vocab_size == loaded_assistant_model.config.vocab_size same_special_tokens = all( getattr(model.config, token) == getattr(loaded_assistant_model.config, token) for token in ("eos_token_id", "pad_token_id", "bos_token_id") ) if same_vocab_size and same_special_tokens: loaded_assistant_tokenizer = None elif loaded_assistant_tokenizer is None: raise ValueError( "The assistant model has a different tokenizer than the main model. You should pass the assistant " "tokenizer." ) return loaded_assistant_model, loaded_assistant_tokenizer class PipelineException(Exception): """ Raised by a [`Pipeline`] when handling __call__. Args: task (`str`): The task of the pipeline. model (`str`): The model used by the pipeline. reason (`str`): The error message to display. """ def __init__(self, task: str, model: str, reason: str): super().__init__(reason) self.task = task self.model = model class ArgumentHandler(ABC): """ Base interface for handling arguments for each [`~pipelines.Pipeline`]. """ @abstractmethod def __call__(self, *args, **kwargs): raise NotImplementedError() class PipelineDataFormat: """ Base class for all the pipeline supported data format both for reading and writing. Supported data formats currently includes: - JSON - CSV - stdin/stdout (pipe) `PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets columns to pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format. Args: output_path (`str`): Where to save the outgoing data. input_path (`str`): Where to look for the input data. column (`str`): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ SUPPORTED_FORMATS = ["json", "csv", "pipe"] def __init__( self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite: bool = False, ): self.output_path = output_path self.input_path = input_path self.column = column.split(",") if column is not None else [""] self.is_multi_columns = len(self.column) > 1 if self.is_multi_columns: self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column] if output_path is not None and not overwrite: if exists(abspath(self.output_path)): raise OSError(f"{self.output_path} already exists on disk") if input_path is not None: if not exists(abspath(self.input_path)): raise OSError(f"{self.input_path} doesnt exist on disk") @abstractmethod def __iter__(self): raise NotImplementedError() @abstractmethod def save(self, data: Union[dict, List[dict]]): """ Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. Args: data (`dict` or list of `dict`): The data to store. """ raise NotImplementedError() def save_binary(self, data: Union[dict, List[dict]]) -> str: """ Save the provided data object as a pickle-formatted binary data on the disk. Args: data (`dict` or list of `dict`): The data to store. Returns: `str`: Path where the data has been saved. """ path, _ = os.path.splitext(self.output_path) binary_path = os.path.extsep.join((path, "pickle")) with open(binary_path, "wb+") as f_output: pickle.dump(data, f_output) return binary_path @staticmethod def from_str( format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False, ) -> "PipelineDataFormat": """ Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`. Args: format (`str`): The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`. output_path (`str`, *optional*): Where to save the outgoing data. input_path (`str`, *optional*): Where to look for the input data. column (`str`, *optional*): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. Returns: [`~pipelines.PipelineDataFormat`]: The proper data format. """ if format == "json": return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) elif format == "csv": return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) elif format == "pipe": return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) else: raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)") class CsvPipelineDataFormat(PipelineDataFormat): """ Support for pipelines using CSV data format. Args: output_path (`str`): Where to save the outgoing data. input_path (`str`): Where to look for the input data. column (`str`): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ def __init__( self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False, ): super().__init__(output_path, input_path, column, overwrite=overwrite) def __iter__(self): with open(self.input_path, "r") as f: reader = csv.DictReader(f) for row in reader: if self.is_multi_columns: yield {k: row[c] for k, c in self.column} else: yield row[self.column[0]] def save(self, data: List[dict]): """ Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. Args: data (`List[dict]`): The data to store. """ with open(self.output_path, "w") as f: if len(data) > 0: writer = csv.DictWriter(f, list(data[0].keys())) writer.writeheader() writer.writerows(data) class JsonPipelineDataFormat(PipelineDataFormat): """ Support for pipelines using JSON file format. Args: output_path (`str`): Where to save the outgoing data. input_path (`str`): Where to look for the input data. column (`str`): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ def __init__( self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False, ): super().__init__(output_path, input_path, column, overwrite=overwrite) with open(input_path, "r") as f: self._entries = json.load(f) def __iter__(self): for entry in self._entries: if self.is_multi_columns: yield {k: entry[c] for k, c in self.column} else: yield entry[self.column[0]] def save(self, data: dict): """ Save the provided data object in a json file. Args: data (`dict`): The data to store. """ with open(self.output_path, "w") as f: json.dump(data, f) class PipedPipelineDataFormat(PipelineDataFormat): """ Read data from piped input to the python process. For multi columns data, columns should separated by \t If columns are provided, then the output will be a dictionary with {column_x: value_x} Args: output_path (`str`): Where to save the outgoing data. input_path (`str`): Where to look for the input data. column (`str`): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. """ def __iter__(self): for line in sys.stdin: # Split for multi-columns if "\t" in line: line = line.split("\t") if self.column: # Dictionary to map arguments yield {kwargs: l for (kwargs, _), l in zip(self.column, line)} else: yield tuple(line) # No dictionary to map arguments else: yield line def save(self, data: dict): """ Print the data. Args: data (`dict`): The data to store. """ print(data) def save_binary(self, data: Union[dict, List[dict]]) -> str: if self.output_path is None: raise KeyError( "When using piped input on pipeline outputting large object requires an output file path. " "Please provide such output path through --output argument." ) return super().save_binary(data) class _ScikitCompat(ABC): """ Interface layer for the Scikit and Keras compatibility. """ @abstractmethod def transform(self, X): raise NotImplementedError() @abstractmethod def predict(self, X): raise NotImplementedError() def build_pipeline_init_args( has_tokenizer: bool = False, has_feature_extractor: bool = False, has_image_processor: bool = False, has_processor: bool = False, supports_binary_output: bool = True, ) -> str: docstring = r""" Arguments: model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.""" if has_tokenizer: docstring += r""" tokenizer ([`PreTrainedTokenizer`]): The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from [`PreTrainedTokenizer`].""" if has_feature_extractor: docstring += r""" feature_extractor ([`SequenceFeatureExtractor`]): The feature extractor that will be used by the pipeline to encode data for the model. This object inherits from [`SequenceFeatureExtractor`].""" if has_image_processor: docstring += r""" image_processor ([`BaseImageProcessor`]): The image processor that will be used by the pipeline to encode data for the model. This object inherits from [`BaseImageProcessor`].""" if has_processor: docstring += r""" processor ([`ProcessorMixin`]): The processor that will be used by the pipeline to encode data for the model. This object inherits from [`ProcessorMixin`]. Processor is a composite object that might contain `tokenizer`, `feature_extractor`, and `image_processor`.""" docstring += r""" modelcard (`str` or [`ModelCard`], *optional*): Model card attributed to the model for this pipeline. framework (`str`, *optional*): The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. task (`str`, defaults to `""`): A task-identifier for the pipeline. num_workers (`int`, *optional*, defaults to 8): When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the number of workers to be used. batch_size (`int`, *optional*, defaults to 1): When the pipeline will use *DataLoader* (when passing a dataset, on GPU for a Pytorch model), the size of the batch to use, for inference this is not always beneficial, please read [Batching with pipelines](https://huggingface.co/transformers/main_classes/pipelines.html#pipeline-batching) . args_parser ([`~pipelines.ArgumentHandler`], *optional*): Reference to the object in charge of parsing supplied pipeline parameters. device (`int`, *optional*, defaults to -1): Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. You can pass native `torch.device` or a `str` too torch_dtype (`str` or `torch.dtype`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model (`torch.float16`, `torch.bfloat16`, ... or `"auto"`)""" if supports_binary_output: docstring += r""" binary_output (`bool`, *optional*, defaults to `False`): Flag indicating if the output the pipeline should happen in a serialized format (i.e., pickle) or as the raw output data e.g. text.""" return docstring PIPELINE_INIT_ARGS = build_pipeline_init_args( has_tokenizer=True, has_feature_extractor=True, has_image_processor=True, has_processor=True, supports_binary_output=True, ) if is_torch_available(): from transformers.pipelines.pt_utils import ( PipelineChunkIterator, PipelineDataset, PipelineIterator, PipelinePackIterator, ) @add_end_docstrings( build_pipeline_init_args( has_tokenizer=True, has_feature_extractor=True, has_image_processor=True, has_processor=True ) ) class Pipeline(_ScikitCompat, PushToHubMixin): """ The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across different pipelines. Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following operations: Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output Pipeline supports running on CPU or GPU through the device argument (see below). Some pipeline, like for instance [`FeatureExtractionPipeline`] (`'feature-extraction'`) output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we provide the `binary_output` constructor argument. If set to `True`, the output will be stored in the pickle format. """ # Historically we have pipelines working with `tokenizer`, `feature_extractor`, and `image_processor` # as separate processing components. While we have `processor` class that combines them, some pipelines # might still operate with these components separately. # With the addition of `processor` to `pipeline`, we want to avoid: # - loading `processor` for pipelines that still work with `image_processor` and `tokenizer` separately; # - loading `image_processor`/`tokenizer` as a separate component while we operate only with `processor`, # because `processor` will load required sub-components by itself. # Below flags allow granular control over loading components and set to be backward compatible with current # pipelines logic. You may override these flags when creating your pipeline. For example, for # `zero-shot-object-detection` pipeline which operates with `processor` you should set `_load_processor=True` # and all the rest flags to `False` to avoid unnecessary loading of the components. _load_processor = False _load_image_processor = True _load_feature_extractor = True _load_tokenizer = True default_input_names = None def __init__( self, model: Union["PreTrainedModel", "TFPreTrainedModel"], tokenizer: Optional[PreTrainedTokenizer] = None, feature_extractor: Optional[PreTrainedFeatureExtractor] = None, image_processor: Optional[BaseImageProcessor] = None, processor: Optional[ProcessorMixin] = None, modelcard: Optional[ModelCard] = None, framework: Optional[str] = None, task: str = "", args_parser: ArgumentHandler = None, device: Union[int, "torch.device"] = None, torch_dtype: Optional[Union[str, "torch.dtype"]] = None, binary_output: bool = False, **kwargs, ): if framework is None: framework, model = infer_framework_load_model(model, config=model.config) self.task = task self.model = model self.tokenizer = tokenizer self.feature_extractor = feature_extractor self.image_processor = image_processor self.processor = processor self.modelcard = modelcard self.framework = framework # `accelerate` device map hf_device_map = getattr(self.model, "hf_device_map", None) if hf_device_map is not None and device is not None: raise ValueError( "The model has been loaded with `accelerate` and therefore cannot be moved to a specific device. Please " "discard the `device` argument when creating your pipeline object." ) if device is None: if hf_device_map is not None: # Take the first device used by `accelerate`. device = next(iter(hf_device_map.values())) else: device = 0 if is_torch_available() and self.framework == "pt": if device == -1 and self.model.device is not None: device = self.model.device if isinstance(device, torch.device): if device.type == "xpu" and not is_torch_xpu_available(check_device=True): raise ValueError(f'{device} is not available, you should use device="cpu" instead') self.device = device elif isinstance(device, str): if "xpu" in device and not is_torch_xpu_available(check_device=True): raise ValueError(f'{device} is not available, you should use device="cpu" instead') self.device = torch.device(device) elif device < 0: self.device = torch.device("cpu") elif is_torch_mlu_available(): self.device = torch.device(f"mlu:{device}") elif is_torch_musa_available(): self.device = torch.device(f"musa:{device}") elif is_torch_cuda_available(): self.device = torch.device(f"cuda:{device}") elif is_torch_npu_available(): self.device = torch.device(f"npu:{device}") elif is_torch_xpu_available(check_device=True): self.device = torch.device(f"xpu:{device}") elif is_torch_mps_available(): self.device = torch.device(f"mps:{device}") else: self.device = torch.device("cpu") else: self.device = device if device is not None else -1 logger.warning(f"Device set to use {self.device}") self.binary_output = binary_output # We shouldn't call `model.to()` for models loaded with accelerate as well as the case that model is already on device if ( self.framework == "pt" and self.model.device != self.device and not (isinstance(self.device, int) and self.device < 0) and hf_device_map is None ): self.model.to(self.device) # If the model can generate: # 1 - create a local generation config. This is done to avoid side-effects on the model as we apply local # tweaks to the generation config. # 2 - load the assistant model if it is passed. self.assistant_model, self.assistant_tokenizer = load_assistant_model( self.model, kwargs.pop("assistant_model", None), kwargs.pop("assistant_tokenizer", None) ) if self.model.can_generate(): self.prefix = self.model.config.prefix if hasattr(self.model.config, "prefix") else None self.generation_config = copy.deepcopy(self.model.generation_config) # Update the generation config with task specific params if they exist # NOTE: `prefix` is pipeline-specific and doesn't exist in the generation config. task_specific_params = self.model.config.task_specific_params if task_specific_params is not None and task in task_specific_params: this_task_params = task_specific_params.get(task) if "prefix" in this_task_params: self.prefix = this_task_params.pop("prefix") self.generation_config.update(**this_task_params) # If the tokenizer has a pad token but the model doesn't, set it so that `generate` is aware of it. if ( self.tokenizer is not None and self.tokenizer.pad_token_id is not None and self.generation_config.pad_token_id is None ): self.generation_config.pad_token_id = self.tokenizer.pad_token_id self.call_count = 0 self._batch_size = kwargs.pop("batch_size", None) self._num_workers = kwargs.pop("num_workers", None) self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs) # In processor only mode, we can get the modality processors from the processor if self.processor is not None and all( [self.tokenizer is None, self.feature_extractor is None, self.image_processor is None] ): self.tokenizer = getattr(self.processor, "tokenizer", None) self.feature_extractor = getattr(self.processor, "feature_extractor", None) self.image_processor = getattr(self.processor, "image_processor", None) if self.image_processor is None and self.feature_extractor is not None: if isinstance(self.feature_extractor, BaseImageProcessor): # Backward compatible change, if users called # ImageSegmentationPipeline(.., feature_extractor=MyFeatureExtractor()) # then we should keep working self.image_processor = self.feature_extractor def save_pretrained( self, save_directory: Union[str, os.PathLike], safe_serialization: bool = True, **kwargs, ): """ Save the pipeline's model and tokenizer. Args: save_directory (`str` or `os.PathLike`): A path to the directory where to saved. It will be created if it doesn't exist. safe_serialization (`str`): Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow. kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if hasattr(self, "_registered_impl"): # Add info to the config pipeline_info = self._registered_impl.copy() custom_pipelines = {} for task, info in pipeline_info.items(): if info["impl"] != self.__class__: continue info = info.copy() module_name = info["impl"].__module__ last_module = module_name.split(".")[-1] # Change classes into their names/full names info["impl"] = f"{last_module}.{info['impl'].__name__}" info["pt"] = tuple(c.__name__ for c in info["pt"]) info["tf"] = tuple(c.__name__ for c in info["tf"]) custom_pipelines[task] = info self.model.config.custom_pipelines = custom_pipelines # Save the pipeline custom code custom_object_save(self, save_directory) kwargs["safe_serialization"] = safe_serialization self.model.save_pretrained(save_directory, **kwargs) if self.tokenizer is not None: self.tokenizer.save_pretrained(save_directory, **kwargs) if self.feature_extractor is not None: self.feature_extractor.save_pretrained(save_directory, **kwargs) if self.image_processor is not None: self.image_processor.save_pretrained(save_directory, **kwargs) if self.modelcard is not None: self.modelcard.save_pretrained(save_directory) def transform(self, X): """ Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). """ return self(X) def predict(self, X): """ Scikit / Keras interface to transformers' pipelines. This method will forward to __call__(). """ return self(X) @property def torch_dtype(self) -> Optional["torch.dtype"]: """ Torch dtype of the model (if it's Pytorch model), `None` otherwise. """ return getattr(self.model, "dtype", None) @contextmanager def device_placement(self): """ Context Manager allowing tensor allocation on the user-specified device in framework agnostic way. Returns: Context manager Examples: ```python # Explicitly ask for tensor allocation on CUDA device :0 pipe = pipeline(..., device=0) with pipe.device_placement(): # Every framework specific tensor allocation will be done on the request device output = pipe(...) ```""" if self.framework == "tf": with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"): yield else: if self.device.type == "cuda": with torch.cuda.device(self.device): yield elif self.device.type == "mlu": with torch.mlu.device(self.device): yield elif self.device.type == "musa": with torch.musa.device(self.device): yield elif self.device.type == "xpu": with torch.xpu.device(self.device): yield else: yield def ensure_tensor_on_device(self, **inputs): """ Ensure PyTorch tensors are on the specified device. Args: inputs (keyword arguments that should be `torch.Tensor`, the rest is ignored): The tensors to place on `self.device`. Recursive on lists **only**. Return: `Dict[str, torch.Tensor]`: The same as `inputs` but on the proper device. """ return self._ensure_tensor_on_device(inputs, self.device) def _ensure_tensor_on_device(self, inputs, device): if isinstance(inputs, ModelOutput): return ModelOutput( {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()} ) elif isinstance(inputs, dict): return {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()} elif isinstance(inputs, UserDict): return UserDict({name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}) elif isinstance(inputs, list): return [self._ensure_tensor_on_device(item, device) for item in inputs] elif isinstance(inputs, tuple): return tuple([self._ensure_tensor_on_device(item, device) for item in inputs]) elif isinstance(inputs, torch.Tensor): return inputs.to(device) else: return inputs def check_model_type(self, supported_models: Union[List[str], dict]): """ Check if the model class is in supported by the pipeline. Args: supported_models (`List[str]` or `dict`): The list of models supported by the pipeline, or a dictionary with model class values. """ if not isinstance(supported_models, list): # Create from a model mapping supported_models_names = [] for _, model_name in supported_models.items(): # Mapping can now contain tuples of models for the same configuration. if isinstance(model_name, tuple): supported_models_names.extend(list(model_name)) else: supported_models_names.append(model_name) if hasattr(supported_models, "_model_mapping"): for _, model in supported_models._model_mapping._extra_content.items(): if isinstance(model_name, tuple): supported_models_names.extend([m.__name__ for m in model]) else: supported_models_names.append(model.__name__) supported_models = supported_models_names if self.model.__class__.__name__ not in supported_models: logger.error( f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are" f" {supported_models}." ) @abstractmethod def _sanitize_parameters(self, **pipeline_parameters): """ _sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__` methods. It should return 3 dictionaries of the resolved parameters used by the various `preprocess`, `forward` and `postprocess` methods. Do not fill dictionaries if the caller didn't specify a kwargs. This lets you keep defaults in function signatures, which is more "natural". It is not meant to be called directly, it will be automatically called and the final parameters resolved by `__init__` and `__call__` """ raise NotImplementedError("_sanitize_parameters not implemented") @abstractmethod def preprocess(self, input_: Any, **preprocess_parameters: Dict) -> Dict[str, GenericTensor]: """ Preprocess will take the `input_` of a specific pipeline and return a dictionary of everything necessary for `_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items. """ raise NotImplementedError("preprocess not implemented") @abstractmethod def _forward(self, input_tensors: Dict[str, GenericTensor], **forward_parameters: Dict) -> ModelOutput: """ _forward will receive the prepared dictionary from `preprocess` and run it on the model. This method might involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess` and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible. It is not meant to be called directly, `forward` is preferred. It is basically the same but contains additional code surrounding `_forward` making sure tensors and models are on the same device, disabling the training part of the code (leading to faster inference). """ raise NotImplementedError("_forward not implemented") @abstractmethod def postprocess(self, model_outputs: ModelOutput, **postprocess_parameters: Dict) -> Any: """ Postprocess will receive the raw outputs of the `_forward` method, generally tensors, and reformat them into something more friendly. Generally it will output a list or a dict or results (containing just strings and numbers). """ raise NotImplementedError("postprocess not implemented") def get_inference_context(self): return torch.no_grad def forward(self, model_inputs, **forward_params): with self.device_placement(): if self.framework == "tf": model_inputs["training"] = False model_outputs = self._forward(model_inputs, **forward_params) elif self.framework == "pt": inference_context = self.get_inference_context() with inference_context(): model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device) model_outputs = self._forward(model_inputs, **forward_params) model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu")) else: raise ValueError(f"Framework {self.framework} is not supported") return model_outputs def get_iterator( self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params ): if isinstance(inputs, collections.abc.Sized): dataset = PipelineDataset(inputs, self.preprocess, preprocess_params) else: if num_workers > 1: logger.warning( "For iterable dataset using num_workers>1 is likely to result" " in errors since everything is iterable, setting `num_workers=1`" " to guarantee correctness." ) num_workers = 1 dataset = PipelineIterator(inputs, self.preprocess, preprocess_params) if "TOKENIZERS_PARALLELISM" not in os.environ: logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already") os.environ["TOKENIZERS_PARALLELISM"] = "false" # TODO hack by collating feature_extractor and image_processor feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor) dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn) model_iterator = PipelineIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size) final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) return final_iterator def __call__(self, inputs, *args, num_workers=None, batch_size=None, **kwargs): if args: logger.warning(f"Ignoring args : {args}") if num_workers is None: if self._num_workers is None: num_workers = 0 else: num_workers = self._num_workers if batch_size is None: if self._batch_size is None: batch_size = 1 else: batch_size = self._batch_size preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs) # Fuse __init__ params and __call__ params without modifying the __init__ ones. preprocess_params = {**self._preprocess_params, **preprocess_params} forward_params = {**self._forward_params, **forward_params} postprocess_params = {**self._postprocess_params, **postprocess_params} self.call_count += 1 if self.call_count > 10 and self.framework == "pt" and self.device.type == "cuda": logger.warning_once( "You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a" " dataset", ) is_dataset = Dataset is not None and isinstance(inputs, Dataset) is_generator = isinstance(inputs, types.GeneratorType) is_list = isinstance(inputs, list) is_iterable = is_dataset or is_generator or is_list # TODO make the get_iterator work also for `tf` (and `flax`). can_use_iterator = self.framework == "pt" and (is_dataset or is_generator or is_list) if is_list: if can_use_iterator: final_iterator = self.get_iterator( inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params ) outputs = list(final_iterator) return outputs else: return self.run_multi(inputs, preprocess_params, forward_params, postprocess_params) elif can_use_iterator: return self.get_iterator( inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params ) elif is_iterable: return self.iterate(inputs, preprocess_params, forward_params, postprocess_params) elif self.framework == "pt" and isinstance(self, ChunkPipeline): return next( iter( self.get_iterator( [inputs], num_workers, batch_size, preprocess_params, forward_params, postprocess_params ) ) ) else: return self.run_single(inputs, preprocess_params, forward_params, postprocess_params) def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params): return [self.run_single(item, preprocess_params, forward_params, postprocess_params) for item in inputs] def run_single(self, inputs, preprocess_params, forward_params, postprocess_params): model_inputs = self.preprocess(inputs, **preprocess_params) model_outputs = self.forward(model_inputs, **forward_params) outputs = self.postprocess(model_outputs, **postprocess_params) return outputs def iterate(self, inputs, preprocess_params, forward_params, postprocess_params): # This function should become `get_iterator` again, this is a temporary # easy solution. for input_ in inputs: yield self.run_single(input_, preprocess_params, forward_params, postprocess_params) Pipeline.push_to_hub = copy_func(Pipeline.push_to_hub) if Pipeline.push_to_hub.__doc__ is not None: Pipeline.push_to_hub.__doc__ = Pipeline.push_to_hub.__doc__.format( object="pipe", object_class="pipeline", object_files="pipeline file" ).replace(".from_pretrained", "") class ChunkPipeline(Pipeline): def run_single(self, inputs, preprocess_params, forward_params, postprocess_params): all_outputs = [] for model_inputs in self.preprocess(inputs, **preprocess_params): model_outputs = self.forward(model_inputs, **forward_params) all_outputs.append(model_outputs) outputs = self.postprocess(all_outputs, **postprocess_params) return outputs def get_iterator( self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params ): if "TOKENIZERS_PARALLELISM" not in os.environ: logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already") os.environ["TOKENIZERS_PARALLELISM"] = "false" if num_workers > 1: logger.warning( "For ChunkPipeline using num_workers>0 is likely to result in errors since everything is iterable," " setting `num_workers=1` to guarantee correctness." ) num_workers = 1 dataset = PipelineChunkIterator(inputs, self.preprocess, preprocess_params) # TODO hack by collating feature_extractor and image_processor feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor) dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn) model_iterator = PipelinePackIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size) final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params) return final_iterator class PipelineRegistry: def __init__(self, supported_tasks: Dict[str, Any], task_aliases: Dict[str, str]) -> None: self.supported_tasks = supported_tasks self.task_aliases = task_aliases def get_supported_tasks(self) -> List[str]: supported_task = list(self.supported_tasks.keys()) + list(self.task_aliases.keys()) supported_task.sort() return supported_task def check_task(self, task: str) -> Tuple[str, Dict, Any]: if task in self.task_aliases: task = self.task_aliases[task] if task in self.supported_tasks: targeted_task = self.supported_tasks[task] return task, targeted_task, None if task.startswith("translation"): tokens = task.split("_") if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to": targeted_task = self.supported_tasks["translation"] task = "translation" return task, targeted_task, (tokens[1], tokens[3]) raise KeyError(f"Invalid translation task {task}, use 'translation_XX_to_YY' format") raise KeyError( f"Unknown task {task}, available tasks are {self.get_supported_tasks() + ['translation_XX_to_YY']}" ) def register_pipeline( self, task: str, pipeline_class: type, pt_model: Optional[Union[type, Tuple[type]]] = None, tf_model: Optional[Union[type, Tuple[type]]] = None, default: Optional[Dict] = None, type: Optional[str] = None, ) -> None: if task in self.supported_tasks: logger.warning(f"{task} is already registered. Overwriting pipeline for task {task}...") if pt_model is None: pt_model = () elif not isinstance(pt_model, tuple): pt_model = (pt_model,) if tf_model is None: tf_model = () elif not isinstance(tf_model, tuple): tf_model = (tf_model,) task_impl = {"impl": pipeline_class, "pt": pt_model, "tf": tf_model} if default is not None: if "model" not in default and ("pt" in default or "tf" in default): default = {"model": default} task_impl["default"] = default if type is not None: task_impl["type"] = type self.supported_tasks[task] = task_impl pipeline_class._registered_impl = {task: task_impl} def to_dict(self): return self.supported_tasks
transformers/src/transformers/pipelines/base.py/0
{ "file_path": "transformers/src/transformers/pipelines/base.py", "repo_id": "transformers", "token_count": 26179 }
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import Pipeline, build_pipeline_init_args if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES logger = logging.get_logger(__name__) class ReturnType(enum.Enum): TENSORS = 0 TEXT = 1 @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) class Text2TextGenerationPipeline(Pipeline): """ Pipeline for text to text generation using seq2seq models. Example: ```python >>> from transformers import pipeline >>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap") >>> generator( ... "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google" ... ) [{'generated_text': 'question: Who created the RuPERTa-base?'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about text generation parameters in [Text generation strategies](../generation_strategies) and [Text generation](text_generation). This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"text2text-generation"`. The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available parameters, see the [following documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) Usage: ```python text2text_generator = pipeline("text2text-generation") text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything") ```""" # Used in the return key of the pipeline. return_name = "generated" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) def _sanitize_parameters( self, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, truncation=None, stop_sequence=None, **generate_kwargs, ): preprocess_params = {} if truncation is not None: preprocess_params["truncation"] = truncation forward_params = generate_kwargs postprocess_params = {} if return_tensors is not None and return_type is None: return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: postprocess_params["return_type"] = return_type if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces if stop_sequence is not None: stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) if len(stop_sequence_ids) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) generate_kwargs["eos_token_id"] = stop_sequence_ids[0] if self.assistant_model is not None: forward_params["assistant_model"] = self.assistant_model if self.assistant_tokenizer is not None: forward_params["tokenizer"] = self.tokenizer forward_params["assistant_tokenizer"] = self.assistant_tokenizer return preprocess_params, forward_params, postprocess_params def check_inputs(self, input_length: int, min_length: int, max_length: int): """ Checks whether there might be something wrong with given input with regard to the model. """ return True def _parse_and_tokenize(self, *args, truncation): prefix = self.prefix if self.prefix is not None else "" if isinstance(args[0], list): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input") args = ([prefix + arg for arg in args[0]],) padding = True elif isinstance(args[0], str): args = (prefix + args[0],) padding = False else: raise ValueError( f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" ) inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__(self, *args, **kwargs): r""" Generate the output text(s) using text(s) given as inputs. Args: args (`str` or `List[str]`): Input text for the encoder. return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`): The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE` (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's max_length instead of throwing an error down the line. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./text_generation)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **generated_text** (`str`, present when `return_text=True`) -- The generated text. - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the generated text. """ result = super().__call__(*args, **kwargs) if ( isinstance(args[0], list) and all(isinstance(el, str) for el in args[0]) and all(len(res) == 1 for res in result) ): return [res[0] for res in result] return result def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs): inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs) return inputs def _forward(self, model_inputs, **generate_kwargs): if self.framework == "pt": in_b, input_length = model_inputs["input_ids"].shape elif self.framework == "tf": in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy() self.check_inputs( input_length, generate_kwargs.get("min_length", self.generation_config.min_length), generate_kwargs.get("max_length", self.generation_config.max_length), ) # User-defined `generation_config` passed to the pipeline call take precedence if "generation_config" not in generate_kwargs: generate_kwargs["generation_config"] = self.generation_config output_ids = self.model.generate(**model_inputs, **generate_kwargs) out_b = output_ids.shape[0] if self.framework == "pt": output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:]) elif self.framework == "tf": output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:])) return {"output_ids": output_ids} def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False): records = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: record = {f"{self.return_name}_token_ids": output_ids} elif return_type == ReturnType.TEXT: record = { f"{self.return_name}_text": self.tokenizer.decode( output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) } records.append(record) return records @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) class SummarizationPipeline(Text2TextGenerationPipeline): """ Summarize news articles and other documents. This summarizing pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"summarization"`. The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, '*bart-large-cnn*', '*google-t5/t5-small*', '*google-t5/t5-base*', '*google-t5/t5-large*', '*google-t5/t5-3b*', '*google-t5/t5-11b*'. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=summarization). For a list of available parameters, see the [following documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) Usage: ```python # use bart in pytorch summarizer = pipeline("summarization") summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20) # use t5 in tf summarizer = pipeline("summarization", model="google-t5/t5-base", tokenizer="google-t5/t5-base", framework="tf") summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20) ```""" # Used in the return key of the pipeline. return_name = "summary" def __call__(self, *args, **kwargs): r""" Summarize the text(s) given as inputs. Args: documents (*str* or `List[str]`): One or several articles (or one list of articles) to summarize. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./text_generation)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **summary_text** (`str`, present when `return_text=True`) -- The summary of the corresponding input. - **summary_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the summary. """ return super().__call__(*args, **kwargs) def check_inputs(self, input_length: int, min_length: int, max_length: int) -> bool: """ Checks whether there might be something wrong with given input with regard to the model. """ if max_length < min_length: logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.") if input_length < max_length: logger.warning( f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is " "a summarization task, where outputs shorter than the input are typically wanted, you might " f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" ) @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) class TranslationPipeline(Text2TextGenerationPipeline): """ Translates from one language to another. This translation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"translation_xx_to_yy"`. The models that this pipeline can use are models that have been fine-tuned on a translation task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=translation). For a list of available parameters, see the [following documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate) Usage: ```python en_fr_translator = pipeline("translation_en_to_fr") en_fr_translator("How old are you?") ```""" # Used in the return key of the pipeline. return_name = "translation" def check_inputs(self, input_length: int, min_length: int, max_length: int): if input_length > 0.9 * max_length: logger.warning( f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider " "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def preprocess(self, *args, truncation=TruncationStrategy.DO_NOT_TRUNCATE, src_lang=None, tgt_lang=None): if getattr(self.tokenizer, "_build_translation_inputs", None): return self.tokenizer._build_translation_inputs( *args, return_tensors=self.framework, truncation=truncation, src_lang=src_lang, tgt_lang=tgt_lang ) else: return super()._parse_and_tokenize(*args, truncation=truncation) def _sanitize_parameters(self, src_lang=None, tgt_lang=None, **kwargs): preprocess_params, forward_params, postprocess_params = super()._sanitize_parameters(**kwargs) if src_lang is not None: preprocess_params["src_lang"] = src_lang if tgt_lang is not None: preprocess_params["tgt_lang"] = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. task = kwargs.get("task", self.task) items = task.split("_") if task and len(items) == 4: # translation, XX, to YY preprocess_params["src_lang"] = items[1] preprocess_params["tgt_lang"] = items[3] return preprocess_params, forward_params, postprocess_params def __call__(self, *args, **kwargs): r""" Translate the text(s) given as inputs. Args: args (`str` or `List[str]`): Texts to be translated. return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. src_lang (`str`, *optional*): The language of the input. Might be required for multilingual models. Will not have any effect for single pair translation models tgt_lang (`str`, *optional*): The language of the desired output. Might be required for multilingual models. Will not have any effect for single pair translation models generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./text_generation)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **translation_text** (`str`, present when `return_text=True`) -- The translation. - **translation_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the translation. """ return super().__call__(*args, **kwargs)
transformers/src/transformers/pipelines/text2text_generation.py/0
{ "file_path": "transformers/src/transformers/pipelines/text2text_generation.py", "repo_id": "transformers", "token_count": 7128 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from typing import TYPE_CHECKING, Optional from packaging import version from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..integrations import replace_with_aqlm_linear from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available, logging from ..utils.quantization_config import QuantizationConfigMixin if is_torch_available(): import torch logger = logging.get_logger(__name__) class AqlmHfQuantizer(HfQuantizer): """ Quantizer of the AQLM method. Enables the loading of prequantized models. """ requires_calibration = True required_packages = ["aqlm"] optimum_quantizer = None def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError("Using `aqlm` quantization requires Accelerate: `pip install accelerate`") if not is_aqlm_available(): raise ImportError("Using `aqlm` quantization requires AQLM: `pip install aqlm[gpu,cpu]`") def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: if torch.cuda.is_available(): torch_dtype = torch.float16 logger.info( "CUDA available. Assuming AQLM inference on GPU and loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually." ) else: torch_dtype = torch.float32 logger.info( "CUDA is unavailable. Assuming AQLM inference on CPU and loading the model in `torch.float32`. To overwrite it, set `torch_dtype` manually." ) return torch_dtype def _process_model_before_weight_loading( self, model: "PreTrainedModel", **kwargs, ): replace_with_aqlm_linear( model, quantization_config=self.quantization_config, linear_weights_not_to_quantize=self.quantization_config.linear_weights_not_to_quantize, ) model.config.quantization_config = self.quantization_config def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model @property def is_trainable(self, model: Optional["PreTrainedModel"] = None): aqlm_supports_training = version.parse(importlib.metadata.version("aqlm")) >= version.parse("1.0.2") if aqlm_supports_training: return True else: logger.warning( f"Currently installed `aqlm` version ({importlib.metadata.version('aqlm')}) doesn't support training. If you wish to train a quantized model, please update `aqlm` with `pip install aqlm>=1.0.2`" ) return False def is_serializable(self, safe_serialization=None): return True
transformers/src/transformers/quantizers/quantizer_aqlm.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_aqlm.py", "repo_id": "transformers", "token_count": 1420 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import dataclass, field from typing import Optional, Tuple from .training_args import TrainingArguments from .utils import cached_property, is_tf_available, logging, requires_backends logger = logging.get_logger(__name__) if is_tf_available(): import tensorflow as tf from .modeling_tf_utils import keras @dataclass class TFTrainingArguments(TrainingArguments): """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: output_dir (`str`): The output directory where the model predictions and checkpoints will be written. overwrite_output_dir (`bool`, *optional*, defaults to `False`): If `True`, overwrite the content of the output directory. Use this to continue training if `output_dir` points to a checkpoint directory. do_train (`bool`, *optional*, defaults to `False`): Whether to run training or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. do_eval (`bool`, *optional*): Whether to run evaluation on the validation set or not. Will be set to `True` if `eval_strategy` is different from `"no"`. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. do_predict (`bool`, *optional*, defaults to `False`): Whether to run predictions on the test set or not. This argument is not directly used by [`Trainer`], it's intended to be used by your training/evaluation scripts instead. See the [example scripts](https://github.com/huggingface/transformers/tree/main/examples) for more details. eval_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`): The evaluation strategy to adopt during training. Possible values are: - `"no"`: No evaluation is done during training. - `"steps"`: Evaluation is done (and logged) every `eval_steps`. - `"epoch"`: Evaluation is done at the end of each epoch. per_device_train_batch_size (`int`, *optional*, defaults to 8): The batch size per GPU/TPU core/CPU for training. per_device_eval_batch_size (`int`, *optional*, defaults to 8): The batch size per GPU/TPU core/CPU for evaluation. gradient_accumulation_steps (`int`, *optional*, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. <Tip warning={true}> When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training examples. </Tip> learning_rate (`float`, *optional*, defaults to 5e-5): The initial learning rate for Adam. weight_decay (`float`, *optional*, defaults to 0): The weight decay to apply (if not zero). adam_beta1 (`float`, *optional*, defaults to 0.9): The beta1 hyperparameter for the Adam optimizer. adam_beta2 (`float`, *optional*, defaults to 0.999): The beta2 hyperparameter for the Adam optimizer. adam_epsilon (`float`, *optional*, defaults to 1e-8): The epsilon hyperparameter for the Adam optimizer. max_grad_norm (`float`, *optional*, defaults to 1.0): Maximum gradient norm (for gradient clipping). num_train_epochs(`float`, *optional*, defaults to 3.0): Total number of training epochs to perform. max_steps (`int`, *optional*, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until `max_steps` is reached. warmup_ratio (`float`, *optional*, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to `learning_rate`. warmup_steps (`int`, *optional*, defaults to 0): Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`. logging_dir (`str`, *optional*): [TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to *runs/**CURRENT_DATETIME_HOSTNAME***. logging_strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`): The logging strategy to adopt during training. Possible values are: - `"no"`: No logging is done during training. - `"epoch"`: Logging is done at the end of each epoch. - `"steps"`: Logging is done every `logging_steps`. logging_first_step (`bool`, *optional*, defaults to `False`): Whether to log and evaluate the first `global_step` or not. logging_steps (`int`, *optional*, defaults to 500): Number of update steps between two logs if `logging_strategy="steps"`. save_strategy (`str` or [`~trainer_utils.SaveStrategy`], *optional*, defaults to `"steps"`): The checkpoint save strategy to adopt during training. Possible values are: - `"no"`: No save is done during training. - `"epoch"`: Save is done at the end of each epoch. - `"steps"`: Save is done every `save_steps`. save_steps (`int`, *optional*, defaults to 500): Number of updates steps before two checkpoint saves if `save_strategy="steps"`. save_total_limit (`int`, *optional*): If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in `output_dir`. no_cuda (`bool`, *optional*, defaults to `False`): Whether to not use CUDA even when it is available or not. seed (`int`, *optional*, defaults to 42): Random seed that will be set at the beginning of training. fp16 (`bool`, *optional*, defaults to `False`): Whether to use 16-bit (mixed) precision training (through NVIDIA Apex) instead of 32-bit training. fp16_opt_level (`str`, *optional*, defaults to 'O1'): For `fp16` training, Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details on the [Apex documentation](https://nvidia.github.io/apex/amp). local_rank (`int`, *optional*, defaults to -1): During distributed training, the rank of the process. tpu_num_cores (`int`, *optional*): When training on TPU, the number of TPU cores (automatically passed by launcher script). debug (`bool`, *optional*, defaults to `False`): Whether to activate the trace to record computation graphs and profiling information or not. dataloader_drop_last (`bool`, *optional*, defaults to `False`): Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not. eval_steps (`int`, *optional*, defaults to 1000): Number of update steps before two evaluations. past_index (`int`, *optional*, defaults to -1): Some models like [TransformerXL](../model_doc/transformerxl) or :doc*XLNet <../model_doc/xlnet>* can make use of the past hidden states for their predictions. If this argument is set to a positive int, the `Trainer` will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument `mems`. tpu_name (`str`, *optional*): The name of the TPU the process is running on. tpu_zone (`str`, *optional*): The zone of the TPU the process is running on. If not specified, we will attempt to automatically detect from metadata. gcp_project (`str`, *optional*): Google Cloud Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect from metadata. run_name (`str`, *optional*): A descriptor for the run. Notably used for wandb, mlflow and comet logging. xla (`bool`, *optional*): Whether to activate the XLA compilation or not. """ framework = "tf" tpu_name: Optional[str] = field( default=None, metadata={"help": "Name of TPU"}, ) tpu_zone: Optional[str] = field( default=None, metadata={"help": "Zone of TPU"}, ) gcp_project: Optional[str] = field( default=None, metadata={"help": "Name of Cloud TPU-enabled project"}, ) poly_power: float = field( default=1.0, metadata={"help": "Power for the Polynomial decay LR scheduler."}, ) xla: bool = field(default=False, metadata={"help": "Whether to activate the XLA compilation or not"}) @cached_property def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", int]: requires_backends(self, ["tf"]) logger.info("Tensorflow: setting up strategy") gpus = tf.config.list_physical_devices("GPU") # Set to float16 at first if self.fp16: keras.mixed_precision.set_global_policy("mixed_float16") if self.no_cuda: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") else: try: if self.tpu_name: tpu = tf.distribute.cluster_resolver.TPUClusterResolver( self.tpu_name, zone=self.tpu_zone, project=self.gcp_project ) else: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: if self.tpu_name: raise RuntimeError(f"Couldn't connect to TPU {self.tpu_name}!") else: tpu = None if tpu: # Set to bfloat16 in case of TPU if self.fp16: keras.mixed_precision.set_global_policy("mixed_bfloat16") tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.TPUStrategy(tpu) elif len(gpus) == 0: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") elif len(gpus) == 1: strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") elif len(gpus) > 1: # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` strategy = tf.distribute.MirroredStrategy() else: raise ValueError("Cannot find the proper strategy, please check your environment properties.") return strategy @property def strategy(self) -> "tf.distribute.Strategy": """ The strategy used for distributed training. """ requires_backends(self, ["tf"]) return self._setup_strategy @property def n_replicas(self) -> int: """ The number of replicas (CPUs, GPUs or TPU cores) used in this training. """ requires_backends(self, ["tf"]) return self._setup_strategy.num_replicas_in_sync @property def should_log(self): """ Whether or not the current process should produce log. """ return False # TF Logging is handled by Keras not the Trainer @property def train_batch_size(self) -> int: """ The actual batch size for training (may differ from `per_gpu_train_batch_size` in distributed training). """ if self.per_gpu_train_batch_size: logger.warning( "Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future " "version. Using `--per_device_train_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size return per_device_batch_size * self.n_replicas @property def eval_batch_size(self) -> int: """ The actual batch size for evaluation (may differ from `per_gpu_eval_batch_size` in distributed training). """ if self.per_gpu_eval_batch_size: logger.warning( "Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future " "version. Using `--per_device_eval_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size return per_device_batch_size * self.n_replicas @property def n_gpu(self) -> int: """ The number of replicas (CPUs, GPUs or TPU cores) used in this training. """ requires_backends(self, ["tf"]) warnings.warn( "The n_gpu argument is deprecated and will be removed in a future version, use n_replicas instead.", FutureWarning, ) return self._setup_strategy.num_replicas_in_sync
transformers/src/transformers/training_args_tf.py/0
{ "file_path": "transformers/src/transformers/training_args_tf.py", "repo_id": "transformers", "token_count": 5790 }
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class ASTFeatureExtractor(metaclass=DummyObject): _backends = ["speech"] def __init__(self, *args, **kwargs): requires_backends(self, ["speech"]) class Speech2TextFeatureExtractor(metaclass=DummyObject): _backends = ["speech"] def __init__(self, *args, **kwargs): requires_backends(self, ["speech"])
transformers/src/transformers/utils/dummy_speech_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_speech_objects.py", "repo_id": "transformers", "token_count": 166 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os from typing import Dict, Optional, Union from packaging import version from .hub import cached_file from .import_utils import is_peft_available ADAPTER_CONFIG_NAME = "adapter_config.json" ADAPTER_WEIGHTS_NAME = "adapter_model.bin" ADAPTER_SAFE_WEIGHTS_NAME = "adapter_model.safetensors" def find_adapter_config_file( model_id: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: Optional[bool] = None, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", _commit_hash: Optional[str] = None, ) -> Optional[str]: r""" Simply checks if the model stored on the Hub or locally is an adapter model or not, return the path of the adapter config file if it is, None otherwise. Args: model_id (`str`): The identifier of the model to look for, can be either a local path or an id to the repository on the Hub. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>". </Tip> local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. """ adapter_cached_filename = None if model_id is None: return None elif os.path.isdir(model_id): list_remote_files = os.listdir(model_id) if ADAPTER_CONFIG_NAME in list_remote_files: adapter_cached_filename = os.path.join(model_id, ADAPTER_CONFIG_NAME) else: adapter_cached_filename = cached_file( model_id, ADAPTER_CONFIG_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, _commit_hash=_commit_hash, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) return adapter_cached_filename def check_peft_version(min_version: str) -> None: r""" Checks if the version of PEFT is compatible. Args: version (`str`): The version of PEFT to check against. """ if not is_peft_available(): raise ValueError("PEFT is not installed. Please install it with `pip install peft`") is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) >= version.parse(min_version) if not is_peft_version_compatible: raise ValueError( f"The version of PEFT you are using is not compatible, please use a version that is greater" f" than {min_version}" )
transformers/src/transformers/utils/peft_utils.py/0
{ "file_path": "transformers/src/transformers/utils/peft_utils.py", "repo_id": "transformers", "token_count": 1977 }
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import random import tempfile import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin global_rng = random.Random() if is_torch_available(): import torch # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class ASTFeatureExtractionTester: def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=16000, return_attention_mask=True, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size speech_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class ASTFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = ASTFeatureExtractor def setUp(self): self.feat_extract_tester = ASTFeatureExtractionTester(self) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test not batched input encoded_sequences_1 = feat_extract(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feat_extract(speech_inputs, padding=True, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, padding=True, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) @require_torch def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def test_integration(self): # fmt: off EXPECTED_INPUT_VALUES = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on input_speech = self._load_datasamples(1) feature_extractor = ASTFeatureExtractor() input_values = feature_extractor(input_speech, return_tensors="pt").input_values self.assertEqual(input_values.shape, (1, 1024, 128)) torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertDictEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertEqual(dict_first, dict_second) # exact same tests than before, except that we simulate that torchaudio is not available @require_torch @unittest.mock.patch( "transformers.models.audio_spectrogram_transformer.feature_extraction_audio_spectrogram_transformer.is_speech_available", lambda: False, ) class ASTFeatureExtractionWithoutTorchaudioTest(ASTFeatureExtractionTest): def test_using_audio_utils(self): # Tests that it uses audio_utils instead of torchaudio feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) self.assertTrue(hasattr(feat_extract, "window")) self.assertTrue(hasattr(feat_extract, "mel_filters")) from transformers.models.audio_spectrogram_transformer.feature_extraction_audio_spectrogram_transformer import ( is_speech_available, ) self.assertFalse(is_speech_available())
transformers/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py/0
{ "file_path": "transformers/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py", "repo_id": "transformers", "token_count": 3868 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class TFBlenderbotSmallModelTester: config_cls = BlenderbotSmallConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_blenderbot_small_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFBlenderbotSmallModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_blenderbot_small_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFBlenderbotSmallModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) all_generative_model_classes = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) is_encoder_decoder = True test_pruning = False test_onnx = False def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return pipeline_test_case_name == "TextGenerationPipelineTests" def setUp(self): self.model_tester = TFBlenderbotSmallModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) @require_tokenizers @require_tf class TFBlenderbot90MIntegrationTests(unittest.TestCase): src_text = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] model_name = "facebook/blenderbot_small-90M" @cached_property def tokenizer(self): # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M") @cached_property def model(self): model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) return model @slow def test_90_generation_from_long_input(self): model_inputs = self.tokenizer(self.src_text, return_tensors="tf") generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=True, ) generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
transformers/tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py/0
{ "file_path": "transformers/tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py", "repo_id": "transformers", "token_count": 4303 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChameleonImageProcessor class ChameleonImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=200, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[1.0, 1.0, 1.0], image_std=[1.0, 1.0, 1.0], do_convert_rgb=True, ): size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.expected_output_image_shape def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.prepare_image_inputs def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ChameleonImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ChameleonImageProcessor if is_vision_available() else None # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.setUp with CLIP->Chameleon def setUp(self): super().setUp() self.image_processor_tester = ChameleonImageProcessingTester(self) @property # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_nested_input(self): image_processing = self.image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched as a nested list of images, where each sublist is one batch image_inputs_nested = [image_inputs[:3], image_inputs[3:]] encoded_images_nested = image_processing(image_inputs_nested, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 18, 18) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) # Image processor should return same pixel values, independently of input format self.assertTrue((encoded_images_nested == encoded_images).all())
transformers/tests/models/chameleon/test_image_processing_chameleon.py/0
{ "file_path": "transformers/tests/models/chameleon/test_image_processing_chameleon.py", "repo_id": "transformers", "token_count": 3562 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class CLIPProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = CLIPProcessor def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) image_processor_map = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return CLIPTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return CLIPImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() processor_slow = CLIPProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = CLIPProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = CLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, CLIPTokenizer) self.assertIsInstance(processor_fast.tokenizer, CLIPTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, CLIPImageProcessor) self.assertIsInstance(processor_fast.image_processor, CLIPImageProcessor) def test_save_load_pretrained_additional_features(self): processor = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = CLIPProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, CLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = CLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), processor.model_input_names)
transformers/tests/models/clip/test_processor_clip.py/0
{ "file_path": "transformers/tests/models/clip/test_processor_clip.py", "repo_id": "transformers", "token_count": 3176 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Cohere model.""" import unittest from transformers import CohereConfig, is_torch_available from transformers.testing_utils import ( require_bitsandbytes, require_torch, require_torch_multi_gpu, require_torch_sdpa, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, CohereForCausalLM, CohereModel # Copied from transformers.tests.models.llama.LlamaModelTester with Llama->Cohere class CohereModelTester: config_class = CohereConfig if is_torch_available(): model_class = CohereModel for_causal_lm_class = CohereForCausalLM def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=4, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels # Ignore copy def get_config(self): return self.config_class( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = self.model_class(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = self.model_class(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = self.for_causal_lm_class(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = self.for_causal_lm_class(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class CohereModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CohereModel, CohereForCausalLM) if is_torch_available() else () all_generative_model_classes = (CohereForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": CohereModel, "text-generation": CohereForCausalLM, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = False # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] def setUp(self): self.model_tester = CohereModelTester(self) self.config_tester = ConfigTester(self, config_class=CohereConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_torch_fx_output_loss(self): super().test_torch_fx_output_loss() @require_torch @slow class CohereIntegrationTest(unittest.TestCase): @require_torch_multi_gpu @require_bitsandbytes def test_batched_4bit(self): model_id = "CohereForAI/c4ai-command-r-v01-4bit" EXPECTED_TEXT = [ 'Hello today I am going to show you how to make a simple and easy card using the new stamp set called "Hello" from the Occasions catalog. This set is so versatile and can be used for many occasions. I used the new In', "Hi there, here we are again with another great collection of free fonts for your next project. This time we have gathered 10 free fonts that you can download and use in your designs. These fonts are perfect for any kind", ] model = CohereForCausalLM.from_pretrained(model_id, device_map="auto") tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token text = ["Hello today I am going to show you how to", "Hi there, here we are"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=40, do_sample=False) self.assertEqual(tokenizer.batch_decode(output, skip_special_tokens=True), EXPECTED_TEXT) @require_torch_sdpa def test_batched_small_model_logits(self): # Since the model is very large, we created a random cohere model so that we can do a simple # logits check on it. model_id = "hf-internal-testing/cohere-random" EXPECTED_LOGITS = torch.Tensor( [ [[0.0000, 0.1866, -0.1997], [0.0000, -0.0736, 0.1785], [0.0000, -0.1965, -0.0569]], [[0.0000, -0.0302, 0.1488], [0.0000, -0.0402, 0.1351], [0.0000, -0.0341, 0.1116]], ] ).to(device=torch_device, dtype=torch.float16) tokenizer = AutoTokenizer.from_pretrained(model_id) model = CohereForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.float16).to( torch_device ) tokenizer.pad_token = tokenizer.eos_token text = ["Hello today I am going to show you how to", "Hi there, here we are"] inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch_device) with torch.no_grad(): output = model(**inputs) logits = output.logits torch.testing.assert_close(EXPECTED_LOGITS, logits[:, :3, :3], rtol=1e-3, atol=1e-3)
transformers/tests/models/cohere/test_modeling_cohere.py/0
{ "file_path": "transformers/tests/models/cohere/test_modeling_cohere.py", "repo_id": "transformers", "token_count": 6157 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the TensorFlow ConvNext model.""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import ConvNextConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFConvNextForImageClassification, TFConvNextModel if is_vision_available(): from PIL import Image from transformers import ConvNextImageProcessor class TFConvNextModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, num_stages=4, hidden_sizes=[10, 20, 30, 40], depths=[2, 2, 3, 2], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_stages = num_stages self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ConvNextConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = TFConvNextModel(config=config) result = model(pixel_values, training=False) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFConvNextForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFConvNextModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ConvNext does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFConvNextModel, TFConvNextForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFConvNextModel, "image-classification": TFConvNextForImageClassification} if is_tf_available() else {} ) test_pruning = False test_onnx = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = TFConvNextModelTester(self) self.config_tester = ConfigTester( self, config_class=ConvNextConfig, has_text_modality=False, hidden_size=37, ) @unittest.skip(reason="ConvNext does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) @slow def test_keras_fit(self): super().test_keras_fit() @unittest.skip(reason="ConvNext does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Since ConvNext does not have any attention we need to rewrite this test. def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(tuple_object, dict_object)), msg=( "Tuple and dict output are not equal. Difference:" f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224") self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFConvNextModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ConvNextImageProcessor.from_pretrained("facebook/convnext-tiny-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.0260, -0.4739, 0.1911]) tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4)
transformers/tests/models/convnext/test_modeling_tf_convnext.py/0
{ "file_path": "transformers/tests/models/convnext/test_modeling_tf_convnext.py", "repo_id": "transformers", "token_count": 4942 }
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import EfficientNetImageProcessor class EfficientNetImageProcessorTester: def __init__( self, parent, batch_size=13, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class EfficientNetImageProcessorTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = EfficientNetImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = EfficientNetImageProcessorTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_rescale(self): # EfficientNet optionally rescales between -1 and 1 instead of the usual 0 and 1 image = np.arange(0, 256, 1, dtype=np.uint8).reshape(1, 8, 32) image_processor = self.image_processing_class(**self.image_processor_dict) rescaled_image = image_processor.rescale(image, scale=1 / 127.5) expected_image = (image * (1 / 127.5)).astype(np.float32) - 1 self.assertTrue(np.allclose(rescaled_image, expected_image)) rescaled_image = image_processor.rescale(image, scale=1 / 255, offset=False) expected_image = (image / 255.0).astype(np.float32) self.assertTrue(np.allclose(rescaled_image, expected_image))
transformers/tests/models/efficientnet/test_image_processing_efficientnet.py/0
{ "file_path": "transformers/tests/models/efficientnet/test_image_processing_efficientnet.py", "repo_id": "transformers", "token_count": 1828 }
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import FlaubertConfig, is_sacremoses_available, is_torch_available from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import create_sinusoidal_embeddings class FlaubertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=12, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, summary_type="last", use_proj=None, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_lengths = use_input_lengths self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.vocab_size = vocab_size self.n_special = n_special self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.summary_type = summary_type self.use_proj = use_proj self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2).float() choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def get_config(self): return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def create_and_check_flaubert_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, lengths=input_lengths, langs=token_type_ids) result = model(input_ids, langs=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_flaubert_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertWithLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_flaubert_simple_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnsweringSimple(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_flaubert_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask, ) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, ) (total_loss,) = result_with_labels.to_tuple() result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (total_loss,) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def create_and_check_flaubert_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, labels=sequence_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_flaubert_token_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = FlaubertForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_flaubert_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = FlaubertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class FlaubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() and is_sacremoses_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if ( pipeline_test_case_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False # Flaubert has 2 QA models -> need to manually set the correct labels for one of them here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = FlaubertModelTester(self) self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_flaubert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*config_and_inputs) # Copied from tests/models/distilbert/test_modeling_distilbert.py with Distilbert->Flaubert def test_flaubert_model_with_sinusoidal_encodings(self): config = FlaubertConfig(sinusoidal_embeddings=True) model = FlaubertModel(config=config) sinusoidal_pos_embds = torch.empty((config.max_position_embeddings, config.emb_dim), dtype=torch.float32) create_sinusoidal_embeddings(config.max_position_embeddings, config.emb_dim, sinusoidal_pos_embds) self.model_tester.parent.assertTrue(torch.equal(model.position_embeddings.weight, sinusoidal_pos_embds)) def test_flaubert_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs) def test_flaubert_simple_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*config_and_inputs) def test_flaubert_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*config_and_inputs) def test_flaubert_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs) def test_flaubert_token_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*config_and_inputs) def test_flaubert_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "flaubert/flaubert_small_cased" model = FlaubertModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: self.skipTest(reason="FlauBertForMultipleChoice behaves incorrectly in JIT environments.") config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt")) loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) @require_torch class FlaubertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/flaubert/test_modeling_flaubert.py/0
{ "file_path": "transformers/tests/models/flaubert/test_modeling_flaubert.py", "repo_id": "transformers", "token_count": 8915 }
# coding=utf-8 # Copyright 2020 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import FunnelConfig, FunnelTokenizer, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, ) class FunnelModelTester: """You can also import this e.g, from .test_modeling_funnel import FunnelModelTester""" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, block_sizes=[1, 1, 2], num_decoder_layers=1, d_model=32, n_head=4, d_head=8, d_inner=37, hidden_act="gelu_new", hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, max_position_embeddings=512, type_vocab_size=3, initializer_std=0.02, # Set to a smaller value, so we can keep the small error threshold (1e-5) in the test num_labels=3, num_choices=4, scope=None, base=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.block_sizes = block_sizes self.num_decoder_layers = num_decoder_layers self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = 2 self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.initializer_std = initializer_std # Used in the tests to check the size of the first attention layer self.num_attention_heads = n_head # Used in the tests to check the size of the first hidden state self.hidden_size = self.d_model # Used in the tests to check the number of output hidden states/attentions self.num_hidden_layers = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: self.expected_num_hidden_layers = self.num_hidden_layers + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) fake_token_labels = ids_tensor([self.batch_size, self.seq_length], 1) config = self.get_config() return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) def get_config(self): return FunnelConfig( vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) model.config.truncate_seq = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) model.config.separate_cls = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) def create_and_check_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelBaseModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) model.config.truncate_seq = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model)) model.config.separate_cls = False result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForPreTraining(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=fake_token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_choices = self.num_choices model = FunnelForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): config.num_labels = self.num_labels model = FunnelForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ): model = FunnelForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, fake_token_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FunnelModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False all_model_classes = ( ( FunnelModel, FunnelForMaskedLM, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": (FunnelBaseModel, FunnelModel), "fill-mask": FunnelForMaskedLM, "question-answering": FunnelForQuestionAnswering, "text-classification": FunnelForSequenceClassification, "token-classification": FunnelForTokenClassification, "zero-shot": FunnelForSequenceClassification, } if is_torch_available() else {} ) # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = FunnelModelTester(self) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) for param in ["r_w_bias", "r_r_bias", "r_kernel", "r_s_bias", "seg_embed"]: if hasattr(module, param) and getattr(module, param) is not None: weight = getattr(module, param) weight.data.fill_(3) @require_torch class FunnelBaseModelTest(ModelTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False all_model_classes = ( (FunnelBaseModel, FunnelForMultipleChoice, FunnelForSequenceClassification) if is_torch_available() else () ) def setUp(self): self.model_tester = FunnelModelTester(self, base=True) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) # overwrite from test_modeling_common def test_training(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ == "FunnelBaseModel": continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) for param in ["r_w_bias", "r_r_bias", "r_kernel", "r_s_bias", "seg_embed"]: if hasattr(module, param) and getattr(module, param) is not None: weight = getattr(module, param) weight.data.fill_(3) @require_torch @require_sentencepiece @require_tokenizers class FunnelModelIntegrationTest(unittest.TestCase): def test_inference_tiny_model(self): batch_size = 13 sequence_length = 7 input_ids = torch.arange(0, batch_size * sequence_length).long().reshape(batch_size, sequence_length) lengths = [0, 1, 2, 3, 4, 5, 6, 4, 1, 3, 5, 0, 1] token_type_ids = torch.tensor([[2] + [0] * a + [1] * (sequence_length - a - 1) for a in lengths]) model = FunnelModel.from_pretrained("sgugger/funnel-random-tiny") output = model(input_ids, token_type_ids=token_type_ids)[0].abs() expected_output_sum = torch.tensor(2344.8352) expected_output_mean = torch.tensor(0.8052) torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4) attention_mask = torch.tensor([[1] * 7, [1] * 4 + [0] * 3] * 6 + [[0, 1, 1, 0, 0, 1, 1]]) output = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0].abs() expected_output_sum = torch.tensor(2343.8425) expected_output_mean = torch.tensor(0.8049) torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4) @slow def test_inference_model(self): tokenizer = FunnelTokenizer.from_pretrained("huggingface/funnel-small") model = FunnelModel.from_pretrained("huggingface/funnel-small") inputs = tokenizer("Hello! I am the Funnel Transformer model.", return_tensors="pt") output = model(**inputs)[0] expected_output_sum = torch.tensor(235.7246) expected_output_mean = torch.tensor(0.0256) torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4)
transformers/tests/models/funnel/test_modeling_funnel.py/0
{ "file_path": "transformers/tests/models/funnel/test_modeling_funnel.py", "repo_id": "transformers", "token_count": 9095 }
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unittest from parameterized import parameterized from transformers import GPTBigCodeConfig, is_torch_available from transformers.testing_utils import cleanup, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPT2TokenizerFast, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, ) from transformers.models.gpt_bigcode.modeling_gpt_bigcode import GPTBigCodeAttention class GPTBigCodeModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, multi_query=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 2 self.pad_token_id = vocab_size - 3 self.multi_query = multi_query def get_large_model_config(self): return GPTBigCodeConfig.from_pretrained("bigcode/gpt_bigcode-santacoder") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return GPTBigCodeConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_inner=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, attention_softmax_in_fp32=False, scale_attention_softmax_in_fp32=False, multi_query=self.multi_query, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_gpt_bigcode_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTBigCodeModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_gpt_bigcode_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTBigCodeModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt_bigcode_model_attention_mask_past( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPTBigCodeModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt_bigcode_model_past_large_inputs( self, config, input_ids, input_mask, head_mask, token_type_ids, *args ): model = GPTBigCodeModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPTBigCodeForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False ): model = GPTBigCodeForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_gpt_bigcode_for_sequence_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTBigCodeForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_gpt_bigcode_for_token_classification( self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTBigCodeForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_gpt_bigcode_weight_initialization(self, config, *args): model = GPTBigCodeModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class GPTBigCodeModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): # TODO: Update the tests to use valid pretrained models. all_model_classes = ( ( GPTBigCodeModel, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (GPTBigCodeForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GPTBigCodeModel, "text-classification": GPTBigCodeForSequenceClassification, "text-generation": GPTBigCodeForCausalLM, "token-classification": GPTBigCodeForTokenClassification, "zero-shot": GPTBigCodeForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = False test_missing_keys = False test_pruning = False test_torchscript = False multi_query = True # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) return inputs_dict def setUp(self): self.model_tester = GPTBigCodeModelTester(self, multi_query=self.multi_query) self.config_tester = ConfigTester(self, config_class=GPTBigCodeConfig, n_embd=37) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MQA models does not support retain_grad") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Contrastive search not supported due to non-standard caching mechanism") def test_contrastive_generate(self): pass @unittest.skip(reason="Contrastive search not supported due to non-standard caching mechanism") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="CPU offload seems to be broken for some reason - tiny models keep hitting corner cases") def test_cpu_offload(self): pass @unittest.skip(reason="Disk offload seems to be broken for some reason - tiny models keep hitting corner cases") def test_disk_offload(self): pass @unittest.skip(reason="BigCodeGPT has a non-standard KV cache format.") def test_past_key_values_format(self): pass @unittest.skip(reason="BigCodeGPT has a non-standard KV cache format and breaks this test.") def test_generate_continue_from_inputs_embeds(self): pass def test_gpt_bigcode_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_model(*config_and_inputs) def test_gpt_bigcode_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_model_past(*config_and_inputs) def test_gpt_bigcode_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_model_attention_mask_past(*config_and_inputs) def test_gpt_bigcode_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_model_past_large_inputs(*config_and_inputs) def test_gpt_bigcode_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_gpt_bigcode_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_for_sequence_classification(*config_and_inputs) def test_gpt_bigcode_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_for_token_classification(*config_and_inputs) def test_gpt_bigcode_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_gpt_bigcode_scale_attn_by_inverse_layer_idx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(scale_attn_by_inverse_layer_idx=True) self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs) def test_gpt_bigcode_reorder_and_upcast_attn(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(reorder_and_upcast_attn=True) self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs) def test_gpt_bigcode_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_weight_initialization(*config_and_inputs) @require_torch class GPTBigCodeMHAModelTest(GPTBigCodeModelTest): # `parameterized_class` breaks with mixins, so we use inheritance instead multi_query = False @slow @require_torch class GPTBigCodeModelLanguageGenerationTest(unittest.TestCase): def test_generate_simple(self): model = GPTBigCodeForCausalLM.from_pretrained("bigcode/gpt_bigcode-santacoder").to(torch_device) tokenizer = GPT2TokenizerFast.from_pretrained("bigcode/gpt_bigcode-santacoder") input_ids = tokenizer("def print_hello_world():", return_tensors="pt").input_ids.to(torch_device) output_sequence = model.generate(input_ids) output_sentence = tokenizer.decode(output_sequence[0], skip_special_tokens=True) expected_output = """def print_hello_world():\n print("Hello World!")\n\n\ndef print_hello_""" self.assertEqual(output_sentence, expected_output) def test_generate_batched(self): tokenizer = GPT2TokenizerFast.from_pretrained("bigcode/gpt_bigcode-santacoder") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" model = GPTBigCodeForCausalLM.from_pretrained("bigcode/gpt_bigcode-santacoder").to(torch_device) inputs = tokenizer(["def print_hello_world():", "def say_hello():"], return_tensors="pt", padding=True).to( torch_device ) outputs = model.generate(**inputs) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) expected_output = [ 'def print_hello_world():\n print("Hello World!")\n\n\ndef print_hello_', 'def say_hello():\n print("Hello, World!")\n\n\nsay_hello()', ] self.assertListEqual(outputs, expected_output) @require_torch class GPTBigCodeMQATest(unittest.TestCase): def get_attention(self, multi_query): config = GPTBigCodeConfig.from_pretrained( "bigcode/gpt_bigcode-santacoder", multi_query=multi_query, attn_pdrop=0, resid_pdrop=0, ) return GPTBigCodeAttention(config) @parameterized.expand([(seed, is_train_mode) for seed in range(5) for is_train_mode in [True, False]]) def test_mqa_reduces_to_mha(self, seed, is_train_mode=True): torch.manual_seed(seed) # CREATE MQA AND MHA ATTENTIONS attention_mqa = self.get_attention(True) attention_mha = self.get_attention(False) # ENFORCE MATCHING WEIGHTS num_heads = attention_mqa.num_heads embed_dim = attention_mqa.embed_dim head_dim = attention_mqa.head_dim with torch.no_grad(): mqa_q_weight = attention_mqa.c_attn.weight[:embed_dim, :].view(num_heads, 1, head_dim, embed_dim) mqa_kv_weight = attention_mqa.c_attn.weight[embed_dim:, :].view(1, 2, head_dim, embed_dim) mha_c_weight = torch.cat( [mqa_q_weight, mqa_kv_weight.expand(num_heads, 2, head_dim, embed_dim)], dim=1 ).view(3 * num_heads * head_dim, embed_dim) mqa_q_bias = attention_mqa.c_attn.bias[:embed_dim].view(num_heads, 1, head_dim) mqa_kv_bias = attention_mqa.c_attn.bias[embed_dim:].view(1, 2, head_dim) mha_c_bias = torch.cat([mqa_q_bias, mqa_kv_bias.expand(num_heads, 2, head_dim)], dim=1).view( 3 * num_heads * head_dim ) attention_mha.c_attn.weight.copy_(mha_c_weight) attention_mha.c_attn.bias.copy_(mha_c_bias) attention_mha.c_proj.weight.copy_(attention_mqa.c_proj.weight) attention_mha.c_proj.bias.copy_(attention_mqa.c_proj.bias) # PUT THE MODEL INTO THE CORRECT MODE attention_mha.train(is_train_mode) attention_mqa.train(is_train_mode) # RUN AN INPUT THROUGH THE MODELS num_tokens = 5 hidden_states = torch.randn(1, num_tokens, embed_dim) attention_mha_result = attention_mha(hidden_states)[0] attention_mqa_result = attention_mqa(hidden_states)[0] # CHECK THAT ALL OUTPUTS ARE THE SAME torch.testing.assert_close(attention_mha_result, attention_mqa_result, rtol=1e-5, atol=1e-5)
transformers/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py/0
{ "file_path": "transformers/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py", "repo_id": "transformers", "token_count": 11274 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Granite model.""" import unittest from parameterized import parameterized from transformers import GraniteConfig, is_torch_available, set_seed from transformers.testing_utils import ( require_read_token, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GraniteForCausalLM, GraniteModel, ) from transformers.models.granite.modeling_granite import ( GraniteRotaryEmbedding, ) class GraniteModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return GraniteConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = GraniteModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = GraniteModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = GraniteForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = GraniteForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GraniteModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( GraniteModel, GraniteForCausalLM, ) if is_torch_available() else () ) all_generative_model_classes = (GraniteForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GraniteModel, "text-generation": GraniteForCausalLM, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = False # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] def setUp(self): self.model_tester = GraniteModelTester(self) self.config_tester = ConfigTester(self, config_class=GraniteConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip("Granite buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling_from_config(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = GraniteModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = GraniteModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) def test_model_rope_scaling(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) # Inputs x = torch.randn(1, dtype=torch.float32, device=torch_device) # used exlusively to get the dtype and the device position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device) position_ids_short = position_ids_short.unsqueeze(0) position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device) position_ids_long = position_ids_long.unsqueeze(0) # Sanity check original RoPE original_rope = GraniteRotaryEmbedding(config=config).to(torch_device) original_cos_short, original_sin_short = original_rope(x, position_ids_short) original_cos_long, original_sin_long = original_rope(x, position_ids_long) torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :]) torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :]) # Sanity check linear RoPE scaling # New position "x" should match original position with index "x/scaling_factor" config.rope_scaling = {"type": "linear", "factor": scaling_factor} linear_scaling_rope = GraniteRotaryEmbedding(config=config).to(torch_device) linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short) linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long) torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :]) torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :]) for new_position in range(0, long_input_length, scaling_factor): original_position = int(new_position // scaling_factor) torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :]) torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :]) # Sanity check Dynamic NTK RoPE scaling # Scaling should only be observed after a long input is fed. We can observe that the frequencies increase # with scaling_factor (or that `inv_freq` decreases) config.rope_scaling = {"type": "dynamic", "factor": scaling_factor} ntk_scaling_rope = GraniteRotaryEmbedding(config=config).to(torch_device) ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short) ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long) torch.testing.assert_close(ntk_cos_short, original_cos_short) torch.testing.assert_close(ntk_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_sin_long, original_sin_long) self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all()) # Sanity check Yarn RoPE scaling # Scaling should be over the entire input config.rope_scaling = {"type": "yarn", "factor": scaling_factor} yarn_scaling_rope = GraniteRotaryEmbedding(config=config).to(torch_device) yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short) yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long) torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :]) torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :]) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_short, original_cos_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_long, original_sin_long) @require_torch_gpu class GraniteIntegrationTest(unittest.TestCase): # This variable is used to determine which CUDA device are we using for our runners (A10 or T4) # Depending on the hardware we get different logits / generations cuda_compute_capability_major_version = None @classmethod def setUpClass(cls): if is_torch_available() and torch.cuda.is_available(): # 8 is for A100 / A10 and 7 for T4 cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0] @slow @require_read_token def test_model_3b_logits_bf16(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = GraniteForCausalLM.from_pretrained( "ibm/PowerLM-3b", device_map="auto", torch_dtype=torch.bfloat16, attn_implementation="eager" ) with torch.no_grad(): out = model(torch.tensor([input_ids]).to(torch_device)) # Expected mean on dim = -1 # fmt: off EXPECTED_MEAN = torch.tensor([[-1.9798, -3.1626, -2.8062, -2.3777, -2.7091, -2.2338, -2.5924, -2.3974]]) torch.testing.assert_close(EXPECTED_MEAN.to(torch_device), out.logits.mean(-1), rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:15] EXPECTED_SLICE = torch.tensor([[4.8750, -2.1875, -2.1875, -2.1875, -2.1875, -2.8438, -2.1875, -2.1875, -2.1875, -2.1875, -2.1875, -2.1875, -2.1875, -2.1875, -2.1875]]) # fmt: on self.assertTrue( torch.allclose( EXPECTED_SLICE.to(torch_device), out.logits[0, 0, :15].float(), atol=1e-3, rtol=1e-3, ) ) @slow @require_read_token def test_model_3b_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = GraniteForCausalLM.from_pretrained("ibm/PowerLM-3b", device_map="auto", torch_dtype=torch.float16) with torch.no_grad(): out = model(torch.tensor([input_ids]).to(torch_device)) # fmt: off # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[-2.0984, -3.1294, -2.8153, -2.3568, -2.7337, -2.2624, -2.6016, -2.4022]]) torch.testing.assert_close(EXPECTED_MEAN.to(torch_device), out.logits.float().mean(-1), rtol=1e-2, atol=1e-2)
transformers/tests/models/granite/test_modeling_granite.py/0
{ "file_path": "transformers/tests/models/granite/test_modeling_granite.py", "repo_id": "transformers", "token_count": 8771 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Llava-NeXT-Video model.""" import unittest import numpy as np from huggingface_hub import hf_hub_download from parameterized import parameterized from transformers import ( AutoProcessor, LlavaNextVideoConfig, LlavaNextVideoForConditionalGeneration, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( cleanup, require_bitsandbytes, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, ) if is_torch_available(): import torch if is_vision_available(): from PIL import Image class LlavaNextVideoVisionText2TextModelTester: def __init__( self, parent, ignore_index=-100, image_token_index=0, video_token_index=1, projector_hidden_act="gelu", seq_length=7, vision_feature_select_strategy="default", vision_feature_layer=-1, text_config={ "model_type": "llama", "seq_length": 7, "is_training": True, "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 580, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 2, }, is_training=True, vision_config={ "image_size": 8, "patch_size": 4, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, ): self.parent = parent self.ignore_index = ignore_index self.image_token_index = image_token_index self.video_token_index = video_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.pad_token_id = text_config["pad_token_id"] self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = 3 self.image_size = 30 self.image_grid_pinpoints = [[16, 16]] self.num_image_tokens = 24 self.num_video_tokens = 8 self.seq_length = seq_length + self.num_image_tokens + self.num_video_tokens def get_config(self): return LlavaNextVideoConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, video_token_index=self.video_token_index, projector_hidden_act=self.projector_hidden_act, vision_feature_select_strategy=self.vision_feature_select_strategy, vision_feature_layer=self.vision_feature_layer, image_grid_pinpoints=self.image_grid_pinpoints, video_seq_length=self.num_video_tokens, image_seq_length=self.num_image_tokens, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, 5, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) pixel_values_videos = floats_tensor( [ self.batch_size, 8, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values, pixel_values_videos def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_values_videos = self.prepare_config_and_inputs() input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2 attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device) input_ids[input_ids == config.image_token_index] = self.pad_token_id input_ids[input_ids == config.video_token_index] = self.pad_token_id input_ids[:, : self.num_image_tokens] = config.image_token_index input_ids[:, self.num_image_tokens : self.num_video_tokens + self.num_image_tokens] = config.video_token_index inputs_dict = { "pixel_values": pixel_values, "pixel_values_videos": pixel_values_videos, "image_sizes": torch.tensor( [[self.vision_config["image_size"], self.vision_config["image_size"]]] * self.batch_size ), "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict def create_and_check_llava_next_video_model_fp16_forward( self, config, input_ids, pixel_values, pixel_values_videos, attention_mask, image_sizes ): model = LlavaNextVideoForConditionalGeneration(config=config) model.to(torch_device) model.half() model.eval() logits = model( input_ids=input_ids, attention_mask=attention_mask, image_sizes=image_sizes, pixel_values=pixel_values.to(torch.bfloat16), pixel_values_videos=pixel_values_videos.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) def create_and_check_llava_next_video_model_fp16_autocast_forward( self, config, input_ids, pixel_values, pixel_values_videos, attention_mask, image_sizes ): config.torch_dtype = torch.float16 model = LlavaNextVideoForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.autocast(device_type="cuda", dtype=torch.float16): logits = model( input_ids=input_ids, attention_mask=attention_mask, image_sizes=image_sizes, pixel_values=pixel_values.to(torch.bfloat16), pixel_values_videos=pixel_values_videos.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) @require_torch class LlavaNextVideoForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `LlavaNextVideoForConditionalGeneration`. """ all_model_classes = (LlavaNextVideoForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = (LlavaNextVideoForConditionalGeneration,) if is_torch_available() else () test_pruning = False test_head_masking = False _is_composite = True def setUp(self): self.model_tester = LlavaNextVideoVisionText2TextModelTester(self) common_properties = ["image_token_index", "video_token_index", "vision_feature_layer", "image_seq_length"] self.config_tester = ConfigTester( self, config_class=LlavaNextVideoConfig, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "image_newline" in name: continue elif param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] del inputs["pixel_values_videos"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs # while some other models require pixel_values to be present def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] del inputs["pixel_values_videos"] inputs_embeds = model.get_input_embeddings()(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] torch.testing.assert_close(out_embeds, out_ids) def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) _ = model(**input_dict) # successfull forward with no modifications # remove one image but leave the image token in text input_dict["pixel_values"] = input_dict["pixel_values"][-1:, ...] input_dict["image_sizes"] = input_dict["image_sizes"][-1:, ...] with self.assertRaises(ValueError): _ = model(**input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = input_dict["input_ids"][:1] pixel_values = input_dict["pixel_values"][:1] image_sizes = input_dict["image_sizes"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) image_sizes = torch.cat([image_sizes, image_sizes], dim=0) _ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes) @parameterized.expand( [ (-1,), ([-1],), ([-1, -2],), ], ) def test_vision_feature_layers(self, vision_feature_layer): """ Test that we can use either one vision feature layer, or a list of vision feature layers. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.vision_feature_layer = vision_feature_layer num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer) hidden_size = config.vision_config.hidden_size expected_features = hidden_size * num_feature_layers for model_class in self.all_model_classes: model = model_class(config).to(torch_device) # We should have the right number of input features, # and should be able to run a forward pass without exploding assert model.multi_modal_projector.linear_1.in_features == expected_features model(**input_dict) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Feedforward chunking is not yet supported") def test_feed_forward_chunking(self): pass @unittest.skip(reason="CPU offload is not yet supported") def test_cpu_offload(self): pass @unittest.skip( reason="Compile not yet supported because in LLava models (https://github.com/huggingface/transformers/issues/29891)" ) def test_sdpa_can_compile_dynamic(self): pass @unittest.skip( reason="Compile not yet supported because in LLava models (https://github.com/huggingface/transformers/issues/29891)" ) def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip("FlashAttention only support fp16 and bf16 data type") def test_flash_attn_2_fp32_ln(self): pass @unittest.skip( "VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test" ) def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass @require_torch class LlavaNextVideoForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf") image_file = hf_hub_download( repo_id="raushan-testing-hf/images_test", filename="llava_v1_5_radar.jpg", repo_type="dataset" ) video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) self.image = Image.open(image_file) self.video = np.load(video_file) self.prompt_image = "USER: <image>\nWhat is shown in this image? ASSISTANT:" self.prompt_video = "USER: <video>\nWhy is this video funny? ASSISTANT:" def tearDown(self): cleanup(torch_device, gc_collect=True) @slow @require_bitsandbytes def test_small_model_integration_test(self): model = LlavaNextVideoForConditionalGeneration.from_pretrained( "llava-hf/LLaVA-NeXT-Video-7B-hf", load_in_4bit=True, cache_dir="./" ) inputs = self.processor(self.prompt_video, videos=self.video, return_tensors="pt") # verify single forward pass inputs = inputs.to(torch_device) with torch.no_grad(): output = model(**inputs) # verify generation output = model.generate(**inputs, do_sample=False, max_new_tokens=40) EXPECTED_DECODED_TEXT = 'USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and somewhat comical situation of a young child reading a book while another child is attempting to read the same book. The child who is reading the book seems' # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_batch(self): model = LlavaNextVideoForConditionalGeneration.from_pretrained( "llava-hf/LLaVA-NeXT-Video-7B-hf", load_in_4bit=True, cache_dir="./" ) inputs = self.processor( [self.prompt_video, self.prompt_video], videos=[self.video, self.video], return_tensors="pt", padding=True, ).to(torch_device) output = model.generate(**inputs, do_sample=False, max_new_tokens=20) EXPECTED_DECODED_TEXT = [ 'USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and somewhat comical situation of a young child reading a', 'USER: \nWhy is this video funny? ASSISTANT: The humor in this video comes from the unexpected and somewhat comical situation of a young child reading a' ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_batch_different_vision_types(self): model = LlavaNextVideoForConditionalGeneration.from_pretrained( "llava-hf/LLaVA-NeXT-Video-7B-hf", load_in_4bit=True, cache_dir="./", ) inputs = self.processor( [self.prompt_image, self.prompt_video], images=self.image, videos=self.video, return_tensors="pt", padding=True, ).to(torch_device) # check loss when labels are passed inputs["labels"] = inputs["input_ids"].clone() with torch.no_grad(): output = model(**inputs) self.assertTrue(output.loss is not None) # verify generation output = model.generate(**inputs, do_sample=False, max_new_tokens=50) EXPECTED_DECODED_TEXT = 'USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a graphical representation of a machine learning model\'s performance on a task, likely related to natural language processing or text understanding. It shows a scatter plot with two axes, one labeled "BLIP-2"' # fmt: skip self.assertEqual(self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT) @slow @require_bitsandbytes def test_small_model_integration_test_batch_matches_single(self): model = LlavaNextVideoForConditionalGeneration.from_pretrained( "llava-hf/LLaVA-NeXT-Video-7B-hf", load_in_4bit=True, cache_dir="./" ) inputs_batched = self.processor( [self.prompt_video, self.prompt_image], images=[self.image], videos=[self.video], return_tensors="pt", padding=True, ).to(torch_device) inputs_single = self.processor(self.prompt_video, videos=[self.video], return_tensors="pt").to(torch_device) # verify generation output_batched = model.generate(**inputs_batched, do_sample=False, max_new_tokens=50) output_single = model.generate(**inputs_single, do_sample=False, max_new_tokens=50) self.assertEqual( self.processor.decode(output_batched[0], skip_special_tokens=True), self.processor.decode(output_single[0], skip_special_tokens=True), )
transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py/0
{ "file_path": "transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py", "repo_id": "transformers", "token_count": 9480 }
# coding=utf-8 # Copyright 2018 LXMERT Authors, The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest import numpy as np from transformers import LxmertConfig, is_tf_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, ) if is_tf_available(): import tensorflow as tf class LxmertModelTester: def __init__( self, parent, vocab_size=300, hidden_size=28, num_attention_heads=2, num_labels=2, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, num_qa_labels=30, num_object_labels=16, num_attr_labels=4, num_visual_features=10, l_layers=2, x_layers=1, r_layers=1, visual_feat_dim=128, visual_pos_dim=4, visual_loss_normalizer=6.67, seq_length=20, batch_size=4, is_training=True, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, use_token_type_ids=True, use_lang_mask=True, output_attentions=False, output_hidden_states=False, scope=None, ): self.parent = parent self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_labels = num_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.pad_token_id = pad_token_id self.num_qa_labels = num_qa_labels self.num_object_labels = num_object_labels self.num_attr_labels = num_attr_labels self.l_layers = l_layers self.x_layers = x_layers self.r_layers = r_layers self.visual_feat_dim = visual_feat_dim self.visual_pos_dim = visual_pos_dim self.visual_loss_normalizer = visual_loss_normalizer self.seq_length = seq_length self.batch_size = batch_size self.is_training = is_training self.use_lang_mask = use_lang_mask self.task_matched = task_matched self.task_mask_lm = task_mask_lm self.task_obj_predict = task_obj_predict self.task_qa = task_qa self.visual_obj_loss = visual_obj_loss self.visual_attr_loss = visual_attr_loss self.visual_feat_loss = visual_feat_loss self.num_visual_features = num_visual_features self.use_token_type_ids = use_token_type_ids self.output_attentions = output_attentions self.output_hidden_states = output_hidden_states self.scope = scope self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} def prepare_config_and_inputs(self): output_attentions = self.output_attentions input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_size=self.vocab_size) visual_feats = torch.rand(self.batch_size, self.num_visual_features, self.visual_feat_dim, device=torch_device) bounding_boxes = torch.rand(self.batch_size, self.num_visual_features, 4, device=torch_device) input_mask = None if self.use_lang_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) obj_labels = None if self.task_obj_predict: obj_labels = {} if self.visual_attr_loss and self.task_obj_predict: obj_labels["attr"] = ( ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ) if self.visual_feat_loss and self.task_obj_predict: obj_labels["feat"] = ( ids_tensor( [self.batch_size, self.num_visual_features, self.visual_feat_dim], self.num_visual_features ), ids_tensor([self.batch_size, self.num_visual_features], self.num_visual_features), ) if self.visual_obj_loss and self.task_obj_predict: obj_labels["obj"] = ( ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ) ans = None if self.task_qa: ans = ids_tensor([self.batch_size], self.num_qa_labels) masked_lm_labels = None if self.task_mask_lm: masked_lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) matched_label = None if self.task_matched: matched_label = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return ( config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ) def get_config(self): return LxmertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_attention_heads=self.num_attention_heads, num_labels=self.num_labels, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, pad_token_id=self.pad_token_id, num_qa_labels=self.num_qa_labels, num_object_labels=self.num_object_labels, num_attr_labels=self.num_attr_labels, l_layers=self.l_layers, x_layers=self.x_layers, r_layers=self.r_layers, visual_feat_dim=self.visual_feat_dim, visual_pos_dim=self.visual_pos_dim, visual_loss_normalizer=self.visual_loss_normalizer, task_matched=self.task_matched, task_mask_lm=self.task_mask_lm, task_obj_predict=self.task_obj_predict, task_qa=self.task_qa, visual_obj_loss=self.visual_obj_loss, visual_attr_loss=self.visual_attr_loss, visual_feat_loss=self.visual_feat_loss, output_attentions=self.output_attentions, output_hidden_states=self.output_hidden_states, ) def create_and_check_lxmert_model( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = LxmertModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=not output_attentions, ) result = model(input_ids, visual_feats, bounding_boxes, return_dict=False) result = model(input_ids, visual_feats, bounding_boxes, return_dict=True) self.parent.assertEqual(result.language_output.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual( result.vision_output.shape, (self.batch_size, self.num_visual_features, self.hidden_size) ) self.parent.assertEqual(result.pooled_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_lxmert_for_question_answering( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = LxmertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, labels=ans, output_attentions=output_attentions, ) result = model(input_ids, visual_feats, bounding_boxes, labels=ans) result = model( input_ids, visual_feats, bounding_boxes, labels=ans, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, labels=ans, output_attentions=not output_attentions, ) self.parent.assertEqual(result.question_answering_score.shape, (self.batch_size, self.num_qa_labels)) def create_and_check_lxmert_for_pretraining( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = LxmertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, output_attentions=not output_attentions, return_dict=False, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, obj_labels=obj_labels, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, matched_label=matched_label, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=ans, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=not output_attentions, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def resize_lxmert_num_qa_labels( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): start_labels = config.num_qa_labels num_large_labels = config.num_qa_labels * 2 num_small_labels = int(config.num_qa_labels * 2) less_labels_ans = ids_tensor([self.batch_size], num_small_labels) more_labels_ans = ids_tensor([self.batch_size], num_large_labels) model_pretrain = LxmertForPreTraining(config=config).to(torch_device) model_qa = LxmertForQuestionAnswering(config=config).to(torch_device) config.num_labels = num_small_labels end_labels = config.num_labels result_pretrain = model_pretrain( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=ans, ) result_qa = model_qa( input_ids, visual_feats, bounding_boxes, labels=ans, token_type_ids=token_type_ids, attention_mask=input_mask, ) model_pretrain.resize_num_qa_labels(num_small_labels) model_qa.resize_num_qa_labels(num_small_labels) result_pretrain_less = model_pretrain( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=less_labels_ans, ) result_qa_less = model_qa( input_ids, visual_feats, bounding_boxes, labels=less_labels_ans, token_type_ids=token_type_ids, attention_mask=input_mask, ) model_pretrain.resize_num_qa_labels(num_large_labels) model_qa.resize_num_qa_labels(num_large_labels) result_pretrain_more = model_pretrain( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=more_labels_ans, ) result_qa_more = model_qa( input_ids, visual_feats, bounding_boxes, labels=more_labels_ans, token_type_ids=token_type_ids, attention_mask=input_mask, ) model_qa_labels = model_qa.num_qa_labels self.parent.assertNotEqual(start_labels, end_labels) self.parent.assertNotEqual(model_qa_labels, start_labels) self.parent.assertEqual(result_qa.question_answering_score.shape, (self.batch_size, start_labels)) self.parent.assertEqual(result_pretrain.question_answering_score.shape, (self.batch_size, start_labels)) self.parent.assertEqual(result_qa_less.question_answering_score.shape, (self.batch_size, num_small_labels)) self.parent.assertEqual( result_pretrain_less.question_answering_score.shape, (self.batch_size, num_small_labels) ) self.parent.assertEqual(result_qa_more.question_answering_score.shape, (self.batch_size, num_large_labels)) self.parent.assertEqual( result_pretrain_more.question_answering_score.shape, (self.batch_size, num_large_labels) ) def prepare_config_and_inputs_for_common(self, return_obj_labels=False): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": bounding_boxes, "token_type_ids": token_type_ids, "attention_mask": input_mask, } if return_obj_labels: inputs_dict["obj_labels"] = obj_labels else: config.task_obj_predict = False return config, inputs_dict @require_torch class LxmertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (LxmertModel, LxmertForPreTraining, LxmertForQuestionAnswering) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": LxmertModel, "question-answering": LxmertForQuestionAnswering} if is_torch_available() else {} ) fx_compatible = True test_head_masking = False test_pruning = False test_torchscript = False # overwrite function because qa models takes different input label shape def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): # special case for models like BERT that use multi-loss training for PreTraining inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = LxmertModelTester(self) self.config_tester = ConfigTester(self, config_class=LxmertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_lxmert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_model(*config_and_inputs) def test_lxmert_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_for_question_answering(*config_and_inputs) def test_lxmert_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_for_pretraining(*config_and_inputs) def test_lxmert_question_answering_labels_resize(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.resize_lxmert_num_qa_labels(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "unc-nlp/lxmert-base-uncased" model = LxmertModel.from_pretrained(model_name) model.to(torch_device) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # 2 hidden states were added self.assertEqual(out_len + 2, len(outputs)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) language_hidden_states, vision_hidden_states = outputs[-2], outputs[-1] self.assertEqual(len(language_hidden_states), self.model_tester.num_hidden_layers["language"] + 1) self.assertEqual(len(vision_hidden_states), self.model_tester.num_hidden_layers["vision"] + 1) seq_length = self.model_tester.seq_length num_visual_features = self.model_tester.num_visual_features self.assertListEqual( list(language_hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) self.assertListEqual( list(vision_hidden_states[0].shape[-2:]), [num_visual_features, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) hidden_states_lang = outputs.language_hidden_states[0] attentions_lang = outputs.language_attentions[0] hidden_states_vision = outputs.vision_hidden_states[0] attentions_vision = outputs.vision_attentions[0] hidden_states_lang.retain_grad() attentions_lang.retain_grad() hidden_states_vision.retain_grad() attentions_vision.retain_grad() outputs.language_output.flatten()[0].backward(retain_graph=True) outputs.vision_output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states_lang.grad) self.assertIsNotNone(attentions_vision.grad) self.assertIsNotNone(hidden_states_vision.grad) self.assertIsNotNone(attentions_vision.grad) def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): tf_inputs_dict = {} for key, value in pt_inputs_dict.items(): # skip key that does not exist in tf if isinstance(value, dict): tf_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value) elif isinstance(value, (list, tuple)): tf_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value) elif isinstance(value, bool): tf_inputs_dict[key] = value elif key == "input_values": tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) elif key == "pixel_values": tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) elif key == "input_features": tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) # other general float inputs elif value.is_floating_point(): tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32) else: tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.int32) return tf_inputs_dict @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage_checkpoints(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage_no_safetensors(self): pass @unittest.skip( reason="This architecure has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass @require_torch class LxmertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = LxmertModel.from_pretrained("unc-nlp/lxmert-base-uncased") input_ids = torch.tensor([[101, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 102]]) num_visual_features = 10 _, visual_feats = np.random.seed(0), np.random.rand(1, num_visual_features, model.config.visual_feat_dim) _, visual_pos = np.random.seed(0), np.random.rand(1, num_visual_features, 4) visual_feats = torch.as_tensor(visual_feats, dtype=torch.float32) visual_pos = torch.as_tensor(visual_pos, dtype=torch.float32) output = model(input_ids, visual_feats=visual_feats, visual_pos=visual_pos)[0] expected_shape = torch.Size([1, 11, 768]) self.assertEqual(expected_shape, output.shape) expected_slice = torch.tensor( [[[0.2417, -0.9807, 0.1480], [1.2541, -0.8320, 0.5112], [1.4070, -1.1052, 0.6990]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/lxmert/test_modeling_lxmert.py/0
{ "file_path": "transformers/tests/models/lxmert/test_modeling_lxmert.py", "repo_id": "transformers", "token_count": 15333 }
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_bs4 from transformers.utils import is_bs4_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bs4_available(): from transformers import MarkupLMFeatureExtractor class MarkupLMFeatureExtractionTester: def __init__(self, parent): self.parent = parent def prepare_feat_extract_dict(self): return {} def get_html_strings(): html_string_1 = """<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR="FFFFFF"> <HR> <a href="http://google.com">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style="color:#0000FF"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>""" html_string_2 = """ <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> """ return [html_string_1, html_string_2] @require_bs4 class MarkupLMFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = MarkupLMFeatureExtractor if is_bs4_available() else None def setUp(self): self.feature_extract_tester = MarkupLMFeatureExtractionTester(self) @property def feat_extract_dict(self): return self.feature_extract_tester.prepare_feat_extract_dict() def test_call(self): # Initialize feature_extractor feature_extractor = self.feature_extraction_class() # Test not batched input html_string = get_html_strings()[0] encoding = feature_extractor(html_string) # fmt: off expected_nodes = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']] expected_xpaths = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']] # fmt: on self.assertEqual(encoding.nodes, expected_nodes) self.assertEqual(encoding.xpaths, expected_xpaths) # Test batched html_strings = get_html_strings() encoding = feature_extractor(html_strings) # fmt: off expected_nodes = expected_nodes + [['My First Heading', 'My first paragraph.']] expected_xpaths = expected_xpaths + [['/html/body/h1', '/html/body/p']] self.assertEqual(len(encoding.nodes), 2) self.assertEqual(len(encoding.xpaths), 2) self.assertEqual(encoding.nodes, expected_nodes) self.assertEqual(encoding.xpaths, expected_xpaths)
transformers/tests/models/markuplm/test_feature_extraction_markuplm.py/0
{ "file_path": "transformers/tests/models/markuplm/test_feature_extraction_markuplm.py", "repo_id": "transformers", "token_count": 1478 }
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class MPNetModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def get_large_model_config(self): return MPNetConfig.from_pretrained("microsoft/mpnet-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return MPNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_mpnet_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MPNetModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_mpnet_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MPNetForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_mpnet_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MPNetForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_mpnet_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = MPNetForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_mpnet_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MPNetForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class MPNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = True def setUp(self): self.model_tester = MPNetModelTester(self) self.config_tester = ConfigTester(self, config_class=MPNetConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_mpnet_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*config_and_inputs) @unittest.skip(reason="TFMPNet adds poolers to all models, unlike the PT model class.") def test_tf_from_pt_safetensors(self): return @require_torch class MPNetModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = MPNetModel.from_pretrained("microsoft/mpnet-base") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/mpnet/test_modeling_mpnet.py/0
{ "file_path": "transformers/tests/models/mpnet/test_modeling_mpnet.py", "repo_id": "transformers", "token_count": 4634 }
# coding=utf-8 # Copyright 2024, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Musicgen Melody model.""" import copy import inspect import math import tempfile import unittest import numpy as np from parameterized import parameterized from pytest import mark from transformers import ( EncodecConfig, MusicgenMelodyConfig, MusicgenMelodyDecoderConfig, PretrainedConfig, T5Config, ) from transformers.testing_utils import ( is_torch_available, is_torchaudio_available, require_flash_attn, require_torch, require_torch_accelerator, require_torch_fp16, require_torch_gpu, require_torch_sdpa, require_torchaudio, set_config_for_less_flaky_test, set_model_for_less_flaky_test, set_model_tester_for_less_flaky_test, slow, torch_device, ) from transformers.utils import cached_property, is_torch_bf16_available_on_device, is_torch_fp16_available_on_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, sdpa_kernel from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MusicgenMelodyForCausalLM, MusicgenMelodyForConditionalGeneration, MusicgenMelodyModel, set_seed, ) if is_torchaudio_available(): from transformers import MusicgenMelodyProcessor def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init def prepare_musicgen_melody_decoder_inputs_dict( config, input_ids, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): if attention_mask is None: attention_mask = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1])[:, 0, :] attention_mask = attention_mask.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=torch_device) if encoder_attention_mask is None and encoder_hidden_states is not None: encoder_attention_mask = torch.ones(encoder_hidden_states.shape[:2], device=torch_device) return { "input_ids": input_ids, "attention_mask": attention_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, "head_mask": head_mask, } class MusicgenMelodyDecoderTester: def __init__( self, parent, batch_size=3, # need batch_size != num_hidden_layers because of #29297 seq_length=7, is_training=True, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=100, pad_token_id=99, bos_token_id=99, num_codebooks=4, conditional_seq_length=4, audio_channels=1, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.num_codebooks = num_codebooks self.conditional_seq_length = conditional_seq_length self.encoder_seq_length = conditional_seq_length + seq_length self.audio_channels = audio_channels def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size) encoder_hidden_states = floats_tensor([self.batch_size, self.conditional_seq_length, self.hidden_size]) config = self.get_config() inputs_dict = prepare_musicgen_melody_decoder_inputs_dict( config, input_ids, encoder_hidden_states=encoder_hidden_states, ) return config, inputs_dict def get_config(self): config = MusicgenMelodyDecoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, d_ff=self.intermediate_size, pad_token_id=self.pad_token_id, decoder_start_token_id=self.bos_token_id, bos_token_id=self.bos_token_id, num_codebooks=self.num_codebooks, tie_word_embeddings=False, audio_channels=self.audio_channels, ) return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch class MusicgenMelodyDecoderTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (MusicgenMelodyModel, MusicgenMelodyForCausalLM) if is_torch_available() else () greedy_sample_model_classes = ( (MusicgenMelodyForCausalLM,) if is_torch_available() else () ) # the model uses a custom generation method so we only run a specific subset of the generation tests test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = MusicgenMelodyDecoderTester(self) self.config_tester = ConfigTester(self, config_class=MusicgenMelodyDecoderConfig, hidden_size=16) def test_config(self): self.config_tester.run_common_tests() # special case for labels # Copied from tests.models.musicgen.test_modeling_musicgen.MusicgenDecoderTest._prepare_for_class def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_codebooks), dtype=torch.long, device=torch_device, ) return inputs_dict # Copied from tests.models.musicgen.test_modeling_musicgen.MusicgenDecoderTest.check_training_gradient_checkpointing with Musicgen->MusicgenMelody def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = MusicgenMelodyForCausalLM(config) model.to(torch_device) model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model.train() # Contrarily to the initial method, we don't unfreeze freezed parameters. # Indeed, sinusoidal position embeddings have frozen weights that should stay frozen. optimizer = torch.optim.SGD(model.parameters(), lr=0.01) inputs = self._prepare_for_class(inputs_dict, MusicgenMelodyForCausalLM, return_labels=True) loss = model(**inputs).loss loss.backward() optimizer.step() for k, v in model.named_parameters(): if v.requires_grad: self.assertTrue(v.grad is not None, f"{k} in {MusicgenMelodyForCausalLM.__name__} has no gradient!") # override since we have to compute the input embeddings over codebooks def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] embed_tokens = model.get_input_embeddings() input_ids = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1]) inputs["inputs_embeds"] = sum( [embed_tokens[codebook](input_ids[:, codebook]) for codebook in range(config.num_codebooks)] ) with torch.no_grad(): model(**inputs)[0] # override since we have embeddings / LM heads over multiple codebooks def test_model_get_set_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) first_embed = model.get_input_embeddings()[0] self.assertIsInstance(first_embed, torch.nn.Embedding) lm_heads = model.get_output_embeddings() self.assertTrue(lm_heads is None or isinstance(lm_heads[0], torch.nn.Linear)) @unittest.skip(reason="MusicGen melody does not use inputs_embeds") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip(reason="this model doesn't support all arguments tested") def test_model_outputs_equivalence(self): pass @unittest.skip(reason="this model has multiple inputs embeds and lm heads that should not be tied") def test_tie_model_weights(self): pass @unittest.skip(reason="this model has multiple inputs embeds and lm heads that should not be tied") def test_tied_weights_keys(self): pass def _get_logits_processor_kwargs(self, do_sample=False, config=None): logits_processor_kwargs = {} return logits_processor_kwargs def test_greedy_generate_stereo_outputs(self): original_audio_channels = self.model_tester.audio_channels self.model_tester.audio_channels = 2 super().test_greedy_generate_dict_outputs() self.model_tester.audio_channels = original_audio_channels @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow # Copied from tests.models.musicgen.test_modeling_musicgen.MusicgenDecoderTest.test_flash_attn_2_inference_equivalence def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) # Ignore copy dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: # Ignore copy dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 # Ignore copy outputs = model(dummy_input, output_hidden_states=True) # Ignore copy outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) # Ignore copy other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() _ = model_fa(dummy_input, **other_inputs) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow # Copied from tests.models.musicgen.test_modeling_musicgen.MusicgenDecoderTest.test_flash_attn_2_inference_equivalence_right_padding def test_flash_attn_2_inference_equivalence_right_padding(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) # Ignore copy dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: # Ignore copy dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input) outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) # Ignore copy other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa # Copied from tests.test_modeling_common.ModelTesterMixin.test_eager_matches_sdpa_inference def test_eager_matches_sdpa_inference(self, torch_dtype: str): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self.all_model_classes[0]._supports_sdpa: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") if torch_dtype == "float16" and not is_torch_fp16_available_on_device(torch_device): self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") if torch_dtype == "bfloat16" and not is_torch_bf16_available_on_device(torch_device): self.skipTest( f"bfloat16 not supported on {torch_device} (on the specific device currently used, e.g. Nvidia T4 GPU)" ) # Not sure whether it's fine to put torch.XXX in a decorator if torch is not available so hacking it here instead. if torch_dtype == "float16": torch_dtype = torch.float16 elif torch_dtype == "bfloat16": torch_dtype = torch.bfloat16 elif torch_dtype == "float32": torch_dtype = torch.float32 atols = { ("cpu", False, torch.float32): 1e-6, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-6, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-6, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-6, ("cuda", True, torch.bfloat16): 1e-2, ("cuda", True, torch.float16): 5e-3, } rtols = { ("cpu", False, torch.float32): 1e-4, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-4, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-4, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-4, ("cuda", True, torch.bfloat16): 3e-2, ("cuda", True, torch.float16): 5e-3, } def get_mean_reldiff(failcase, x, ref, atol, rtol): return f"{failcase}: mean relative difference: {((x - ref).abs() / (ref.abs() + 1e-12)).mean():.3e}, torch atol = {atol}, torch rtol = {rtol}" set_model_tester_for_less_flaky_test(self) for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() set_config_for_less_flaky_test(config) model = model_class(config) is_encoder_decoder = model.config.is_encoder_decoder with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype) model_sdpa = model_sdpa.eval().to(torch_device) model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) set_model_for_less_flaky_test(model_eager) set_model_for_less_flaky_test(model_sdpa) # We use these for loops instead of parameterized.expand just for the interest of avoiding loading/saving 8 times the model, # but it would be nicer to have an efficient way to use parameterized.expand fail_cases = [] for padding_side in ["left", "right"]: for use_mask in [False, True]: for batch_size in [7]: # Ignore copy batch_size_input_ids = self.model_tester.num_codebooks * batch_size dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: dummy_input = dummy_input.to(torch_dtype) # Ignore copy dummy_input = dummy_input[:batch_size_input_ids] # Ignore copy if dummy_input.shape[0] != batch_size_input_ids: if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: # Ignore copy extension = torch.rand( batch_size_input_ids - dummy_input.shape[0], *dummy_input.shape[1:], dtype=torch_dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) else: # Ignore copy extension = torch.randint( high=5, size=(batch_size_input_ids - dummy_input.shape[0], *dummy_input.shape[1:]), dtype=dummy_input.dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) if not use_mask: dummy_attention_mask = None else: dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is None: if is_encoder_decoder: seqlen = inputs_dict.get("decoder_input_ids", dummy_input).shape[-1] else: seqlen = dummy_input.shape[-1] dummy_attention_mask = ( torch.ones(batch_size, seqlen).to(torch.int64).to(torch_device) ) dummy_attention_mask = dummy_attention_mask[:batch_size] if dummy_attention_mask.shape[0] != batch_size: extension = torch.ones( batch_size - dummy_attention_mask.shape[0], *dummy_attention_mask.shape[1:], dtype=dummy_attention_mask.dtype, device=torch_device, ) dummy_attention_mask = torch.cat((dummy_attention_mask, extension), dim=0) dummy_attention_mask = dummy_attention_mask.to(torch_device) dummy_attention_mask[:] = 1 if padding_side == "left": dummy_attention_mask[-1, :2] = 0 dummy_attention_mask[-1, 2:] = 1 elif padding_side == "right": dummy_attention_mask[-1, -2:] = 0 dummy_attention_mask[-1, :-2] = 1 for enable_kernels in [False, True]: failcase = f"padding_side={padding_side}, use_mask={use_mask}, batch_size={batch_size}, enable_kernels={enable_kernels}" other_inputs = { "output_hidden_states": True, } # Otherwise fails for e.g. WhisperEncoderModel if "attention_mask" in inspect.signature(model_eager.forward).parameters: other_inputs["attention_mask"] = dummy_attention_mask # TODO: test gradients as well (& for FA2 as well!) with torch.no_grad(): with sdpa_kernel( enable_flash=enable_kernels, enable_math=True, enable_mem_efficient=enable_kernels, ): outputs_eager = model_eager(dummy_input, **other_inputs) outputs_sdpa = model_sdpa(dummy_input, **other_inputs) logits_eager = ( outputs_eager.hidden_states[-1] if not is_encoder_decoder else outputs_eager.decoder_hidden_states[-1] ) logits_sdpa = ( outputs_sdpa.hidden_states[-1] if not is_encoder_decoder else outputs_sdpa.decoder_hidden_states[-1] ) if torch_device in ["cpu", "cuda"]: atol = atols[torch_device, enable_kernels, torch_dtype] rtol = rtols[torch_device, enable_kernels, torch_dtype] elif torch_device == "xpu": # As of PyTorch 2.5 XPU backend supports only torch.nn.attention.SDPBackend.MATH # which is implemented on PyTorch level using aten operators and is # device agnostic with respect to implementation of each aten operator. atol = atols["cuda", False, torch_dtype] rtol = rtols["cuda", False, torch_dtype] else: atol = 1e-7 rtol = 1e-4 # Masked tokens output slightly deviates - we don't mind that. if use_mask: _logits_sdpa = torch.zeros_like(input=logits_sdpa) _logits_eager = torch.zeros_like(input=logits_eager) _logits_sdpa[:-1] = logits_sdpa[:-1] _logits_eager[:-1] = logits_eager[:-1] if padding_side == "left": _logits_sdpa[-1:, 2:] = logits_sdpa[-1:, 2:] _logits_eager[-1:, 2:] = logits_eager[-1:, 2:] elif padding_side == "right": _logits_sdpa[-1:, 2:] = logits_sdpa[-1:, :-2] _logits_eager[-1:, 2:] = logits_eager[-1:, :-2] logits_sdpa = _logits_sdpa logits_eager = _logits_eager results = [ torch.allclose(_logits_sdpa, _logits_eager, atol=atol, rtol=rtol) for (_logits_sdpa, _logits_eager) in zip(logits_sdpa, logits_eager) ] # If 80% batch elements have matched results, it's fine if np.mean(results) < 0.8: fail_cases.append( get_mean_reldiff(failcase, logits_sdpa, logits_eager, atol, rtol) ) self.assertTrue(len(fail_cases) == 0, "\n".join(fail_cases)) def prepare_musicgen_melody_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, labels=None, ): if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.reshape( -1, config.decoder.num_codebooks, decoder_input_ids.shape[-1] )[:, 0, :] decoder_attention_mask = decoder_attention_mask.ne(config.decoder.pad_token_id) if head_mask is None: head_mask = torch.ones( config.text_encoder.num_hidden_layers, config.text_encoder.num_attention_heads, device=torch_device ) if decoder_head_mask is None: decoder_head_mask = torch.ones( config.decoder.num_hidden_layers, config.decoder.num_attention_heads, device=torch_device ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "labels": labels, } class MusicgenMelodyTester: def __init__( self, parent, batch_size=3, # need batch_size != num_hidden_layers because of #29297 seq_length=7, is_training=True, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=100, pad_token_id=99, bos_token_id=99, num_codebooks=4, num_filters=4, codebook_size=128, conditional_seq_length=3, chroma_length=24, audio_channels=1, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.num_codebooks = num_codebooks self.num_filters = num_filters self.codebook_size = codebook_size self.conditional_seq_length = conditional_seq_length self.chroma_length = chroma_length self.encoder_seq_length = conditional_seq_length + seq_length self.audio_channels = audio_channels def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.conditional_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_musicgen_melody_inputs_dict(config, input_ids, decoder_input_ids=decoder_input_ids) return config, inputs_dict def get_config(self): text_encoder_config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.intermediate_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, ) audio_encoder_config = EncodecConfig( hidden_size=self.vocab_size, compress=1, num_filters=self.num_filters, codebook_size=self.codebook_size, codebook_dim=self.vocab_size, ) decoder_config = MusicgenMelodyDecoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, pad_token_id=self.pad_token_id, decoder_start_token_id=self.bos_token_id, bos_token_id=self.bos_token_id, num_codebooks=self.num_codebooks, tie_word_embeddings=False, audio_channels=self.audio_channels, ) config = MusicgenMelodyConfig.from_sub_models_config( text_encoder_config, audio_encoder_config, decoder_config, chroma_length=self.chroma_length ) return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict @require_torch # Copied from tests.models.musicgen.test_modeling_musicgen.MusicgenTest with Musicgen->MusicgenMelody, musicgen->musicgen_melody, EncoderDecoder->DecoderOnly, input_values->input_features class MusicgenMelodyTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MusicgenMelodyForConditionalGeneration,) if is_torch_available() else () greedy_sample_model_classes = (MusicgenMelodyForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = {"text-to-audio": MusicgenMelodyForConditionalGeneration} if is_torch_available() else {} test_pruning = False # training is not supported yet for MusicGen test_headmasking = False test_resize_embeddings = False # not to test torchscript as the model tester doesn't prepare `input_features` and `padding_mask` # (and `torchscript` hates `None` values). test_torchscript = False _is_composite = True def setUp(self): self.model_tester = MusicgenMelodyTester(self) # special case for labels def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_codebooks), dtype=torch.long, device=torch_device, ) return inputs_dict def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model.train() # The audio encoder weights are not used during the forward pass (only during the generate pass) # So we need to freeze it to be able to train. model.freeze_audio_encoder() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() optimizer.step() for k, v in model.named_parameters(): if v.requires_grad: self.assertTrue(v.grad is not None, f"{k} in {model_class.__name__} has no gradient!") # Ignore copy def _check_output_with_attentions(self, outputs, config, input_ids, decoder_input_ids): decoder_config = config.decoder decoder_attentions = outputs["attentions"] num_decoder_layers = decoder_config.num_hidden_layers self.assertEqual(len(decoder_attentions), num_decoder_layers) output_shape = decoder_input_ids.shape[-1] + input_ids.shape[-1] + self.model_tester.chroma_length self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, output_shape, output_shape), ) def check_musicgen_melody_model_output_attentions( self, model_class, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs, ): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, output_attentions=True, **kwargs, ) self._check_output_with_attentions(outputs, config, input_ids, decoder_input_ids) # Ignore copy def check_musicgen_melody_model_output_attentions_from_config( self, model_class, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs, ): # Similar to `check_musicgen_melody_model_output_attentions`, but with `output_attentions` triggered from the # config file. Contrarily to most models, changing the model's config won't work -- the defaults are loaded # from the inner models' configurations. config.output_attentions = True # model config -> won't work model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, **kwargs, ) self.assertTrue(all(key not in outputs for key in ["encoder_attentions", "decoder_attentions"])) config.text_encoder.output_attentions = True # inner model config -> will work config.audio_encoder.output_attentions = True config.decoder.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, **kwargs, ) self._check_output_with_attentions(outputs, config, input_ids, decoder_input_ids) # override since changing `output_attentions` from the top-level model config won't work def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.check_musicgen_melody_model_output_attentions(model_class, config, **inputs_dict) self.check_musicgen_melody_model_output_attentions_from_config(model_class, config, **inputs_dict) # override since we have a specific forward signature for musicgen_melody # Ignore copy def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_ids", "attention_mask", "input_features", "decoder_input_ids", "decoder_attention_mask", ] if "head_mask" and "decoder_head_mask" in arg_names: expected_arg_names.extend(["head_mask", "decoder_head_mask"]) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) # override since changing `gradient_checkpointing` from the top-level model config won't work def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.text_encoder.gradient_checkpointing = True config.audio_encoder.gradient_checkpointing = True config.decoder.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) @unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied.") def test_tie_model_weights(self): pass @unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied.") def test_tied_model_weights_key_ignore(self): pass @unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied.") def test_tied_weights_keys(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage_checkpoints(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage_no_safetensors(self): pass # override since changing `output_hidden_states` / `output_attentions` from the top-level model config won't work # Ignore copy def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.text_encoder.output_hidden_states = True config.audio_encoder.output_hidden_states = True config.decoder.output_hidden_states = True config.text_encoder.output_attentions = True config.decoder.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: decoder_attentions = outputs.attentions[0] decoder_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(decoder_attentions.grad) # override since changing `output_hidden_states` from the top-level model config won't work def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) # Ignore copy seq_length = self.model_tester.conditional_seq_length + self.model_tester.chroma_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) # Ignore copy seq_length = self.model_tester.encoder_seq_length + self.model_tester.chroma_length # Ignore copy expected_num_layers = self.model_tester.num_hidden_layers + 1 # Ignore copy hidden_states = outputs.hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.text_encoder.output_hidden_states = True config.audio_encoder.output_hidden_states = True config.decoder.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # override since the conv layers and lstm's in encodec are exceptions def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = ["conv"] ignore_init = ["lstm"] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif not any(x in name for x in ignore_init): self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # override since we have embeddings / LM heads over multiple codebooks def test_model_get_set_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), torch.nn.Embedding) lm_heads = model.get_output_embeddings() self.assertTrue(lm_heads is None or isinstance(lm_heads[0], torch.nn.Linear)) def _get_logits_processor_kwargs(self, do_sample=False, config=None): logits_processor_kwargs = {} return logits_processor_kwargs @require_torch_fp16 @require_torch_accelerator # not all operations are supported in fp16 on CPU def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.greedy_sample_model_classes: model = model_class(config).eval().to(torch_device) model.half() # greedy model.generate(input_dict["input_ids"], attention_mask=input_dict["attention_mask"], max_new_tokens=10) # sampling model.generate( input_dict["input_ids"], attention_mask=input_dict["attention_mask"], do_sample=True, max_new_tokens=10 ) def test_greedy_generate_stereo_outputs(self): original_audio_channels = self.model_tester.audio_channels self.model_tester.audio_channels = 2 super().test_greedy_generate_dict_outputs() self.model_tester.audio_channels = original_audio_channels @unittest.skip( reason="MusicgenMelodyModel is actually not the base of MusicgenMelodyForCausalLM as the latter is a composit model" ) def test_save_load_fast_init_from_base(self): pass @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow # Adapted from tests.test_modeling_common.ModelTesterMixin.test_flash_attn_2_inference_equivalence def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation={"decoder": "flash_attention_2", "audio_encoder": None, "text_encoder": None}, ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) # Ignore copy dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: # Ignore copy dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 # Ignore copy decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input) # Ignore copy outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) # Ignore copy outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) # Ignore copy other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } # Ignore copy if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask # Ignore copy outputs = model(dummy_input, **other_inputs) # Ignore copy outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() _ = model_fa(dummy_input, **other_inputs) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_conversion(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation={"decoder": "flash_attention_2", "audio_encoder": None, "text_encoder": None}, ).to(torch_device) for _, module in model.named_modules(): if "FlashAttention" in module.__class__.__name__: return self.assertTrue(False, "FlashAttention2 modules not found in model") @require_torch_sdpa @require_torch_gpu @slow def test_sdpa_can_dispatch_on_flash(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") torch.compiler.reset() compute_capability = torch.cuda.get_device_capability() major, _ = compute_capability if not torch.version.cuda or major < 8: self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0") for model_class in self.all_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = self._prepare_for_class(inputs_dict, model_class) if config.model_type in ["llava", "llava_next", "vipllava", "video_llava"]: self.skipTest( reason="Llava-like models currently (transformers==4.39.1) requires an attention_mask input" ) if config.model_type in ["paligemma"]: self.skipTest( "PaliGemma-like models currently (transformers==4.41.0) requires an attention_mask input" ) if config.model_type in ["idefics", "idefics2", "idefics3"]: self.skipTest(reason="Idefics currently (transformers==4.39.1) requires an image_attention_mask input") model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation={"decoder": "sdpa", "audio_encoder": None, "text_encoder": None}, ) model.to(torch_device) inputs_dict.pop("attention_mask", None) inputs_dict.pop("decoder_attention_mask", None) for name, inp in inputs_dict.items(): if isinstance(inp, torch.Tensor) and inp.dtype in [torch.float32, torch.float16]: inputs_dict[name] = inp.to(torch.float16) with sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): _ = model(**inputs_dict) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow # Adapted from tests.test_modeling_common.ModelTesterMixin.test_flash_attn_2_inference_equivalence_right_padding def test_flash_attn_2_inference_equivalence_right_padding(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation={"decoder": "flash_attention_2", "audio_encoder": None, "text_encoder": None}, ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) # Ignore copy dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: # Ignore copy dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 # Ignore copy decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input) # Ignore copy outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) # Ignore copy outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) # Ignore copy other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } # Ignore copy if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask # Ignore copy outputs = model(dummy_input, **other_inputs) # Ignore copy outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) @require_torch_sdpa def test_sdpa_can_dispatch_composite_models(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) audio_encoder_attn = "sdpa" if model.audio_encoder._supports_sdpa else "eager" text_encoder_attn = "sdpa" if model.text_encoder._supports_sdpa else "eager" decoder_attn = "sdpa" if model.decoder._supports_sdpa else "eager" # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(model_sdpa.audio_encoder.config._attn_implementation == audio_encoder_attn) self.assertTrue(model_sdpa.text_encoder.config._attn_implementation == text_encoder_attn) self.assertTrue(model_sdpa.decoder.config._attn_implementation == decoder_attn) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.audio_encoder.config._attn_implementation == "eager") self.assertTrue(model_eager.text_encoder.config._attn_implementation == "eager") self.assertTrue(model_eager.decoder.config._attn_implementation == "eager") self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: has_sdpa = True break if not has_sdpa and model_sdpa.config.model_type != "falcon": raise ValueError("The SDPA model should have SDPA attention layers") @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa # Copied from tests.test_modeling_common.ModelTesterMixin.test_eager_matches_sdpa_inference def test_eager_matches_sdpa_inference(self, torch_dtype: str): if not self.all_model_classes[0]._supports_sdpa: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") if torch_dtype == "float16" and not is_torch_fp16_available_on_device(torch_device): self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") if torch_dtype == "bfloat16" and not is_torch_bf16_available_on_device(torch_device): self.skipTest( f"bfloat16 not supported on {torch_device} (on the specific device currently used, e.g. Nvidia T4 GPU)" ) # Not sure whether it's fine to put torch.XXX in a decorator if torch is not available so hacking it here instead. if torch_dtype == "float16": torch_dtype = torch.float16 elif torch_dtype == "bfloat16": torch_dtype = torch.bfloat16 elif torch_dtype == "float32": torch_dtype = torch.float32 atols = { ("cpu", False, torch.float32): 1e-6, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-6, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-6, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-6, ("cuda", True, torch.bfloat16): 1e-2, ("cuda", True, torch.float16): 5e-3, } rtols = { ("cpu", False, torch.float32): 1e-4, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-4, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-4, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-4, ("cuda", True, torch.bfloat16): 3e-2, ("cuda", True, torch.float16): 5e-3, } def get_mean_reldiff(failcase, x, ref, atol, rtol): return f"{failcase}: mean relative difference: {((x - ref).abs() / (ref.abs() + 1e-12)).mean():.3e}, torch atol = {atol}, torch rtol = {rtol}" set_model_tester_for_less_flaky_test(self) for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() set_config_for_less_flaky_test(config) model = model_class(config) is_encoder_decoder = model.config.is_encoder_decoder with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype) model_sdpa = model_sdpa.eval().to(torch_device) model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) set_model_for_less_flaky_test(model_eager) set_model_for_less_flaky_test(model_sdpa) # We use these for loops instead of parameterized.expand just for the interest of avoiding loading/saving 8 times the model, # but it would be nicer to have an efficient way to use parameterized.expand fail_cases = [] for padding_side in ["left", "right"]: for use_mask in [False, True]: for batch_size in [7]: dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: dummy_input = dummy_input.to(torch_dtype) dummy_input = dummy_input[:batch_size] if dummy_input.shape[0] != batch_size: if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: extension = torch.rand( batch_size - dummy_input.shape[0], *dummy_input.shape[1:], dtype=torch_dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) else: extension = torch.randint( high=5, size=(batch_size - dummy_input.shape[0], *dummy_input.shape[1:]), dtype=dummy_input.dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) if not use_mask: dummy_attention_mask = None else: dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is None: # Ignore copy seqlen = inputs_dict.get("decoder_input_ids", dummy_input).shape[-1] # Ignore copy dummy_attention_mask = ( torch.ones(batch_size, seqlen).to(torch.int64).to(torch_device) ) dummy_attention_mask = dummy_attention_mask[:batch_size] if dummy_attention_mask.shape[0] != batch_size: extension = torch.ones( batch_size - dummy_attention_mask.shape[0], *dummy_attention_mask.shape[1:], dtype=dummy_attention_mask.dtype, device=torch_device, ) dummy_attention_mask = torch.cat((dummy_attention_mask, extension), dim=0) dummy_attention_mask = dummy_attention_mask.to(torch_device) dummy_attention_mask[:] = 1 if padding_side == "left": dummy_attention_mask[-1, :2] = 0 dummy_attention_mask[-1, 2:] = 1 elif padding_side == "right": dummy_attention_mask[-1, -2:] = 0 dummy_attention_mask[-1, :-2] = 1 for enable_kernels in [False, True]: failcase = f"padding_side={padding_side}, use_mask={use_mask}, batch_size={batch_size}, enable_kernels={enable_kernels}" # Ignore copy batch_size_input_ids = self.model_tester.num_codebooks * batch_size # Ignore copy decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[ :batch_size_input_ids ] # Ignore copy if decoder_input_ids.shape[0] != batch_size_input_ids: # Ignore copy extension = torch.ones( batch_size_input_ids - decoder_input_ids.shape[0], *decoder_input_ids.shape[1:], dtype=decoder_input_ids.dtype, device=torch_device, ) decoder_input_ids = torch.cat((decoder_input_ids, extension), dim=0) decoder_input_ids = decoder_input_ids.to(torch_device) # TODO: never an `attention_mask` arg here? # Ignore copy other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } # TODO: test gradients as well (& for FA2 as well!) # Ignore copy with torch.no_grad(): with sdpa_kernel( enable_flash=enable_kernels, enable_math=True, enable_mem_efficient=enable_kernels, ): outputs_eager = model_eager(dummy_input, **other_inputs) outputs_sdpa = model_sdpa(dummy_input, **other_inputs) logits_eager = ( outputs_eager.hidden_states[-1] if not is_encoder_decoder else outputs_eager.decoder_hidden_states[-1] ) logits_sdpa = ( outputs_sdpa.hidden_states[-1] if not is_encoder_decoder else outputs_sdpa.decoder_hidden_states[-1] ) if torch_device in ["cpu", "cuda"]: atol = atols[torch_device, enable_kernels, torch_dtype] rtol = rtols[torch_device, enable_kernels, torch_dtype] elif torch_device == "xpu": # As of PyTorch 2.5 XPU backend supports only torch.nn.attention.SDPBackend.MATH # which is implemented on PyTorch level using aten operators and is # device agnostic with respect to implementation of each aten operator. atol = atols["cuda", False, torch_dtype] rtol = rtols["cuda", False, torch_dtype] else: atol = 1e-7 rtol = 1e-4 # Masked tokens output slightly deviates - we don't mind that. if use_mask: _logits_sdpa = torch.zeros_like(input=logits_sdpa) _logits_eager = torch.zeros_like(input=logits_eager) _logits_sdpa[:-1] = logits_sdpa[:-1] _logits_eager[:-1] = logits_eager[:-1] if padding_side == "left": _logits_sdpa[-1:, 2:] = logits_sdpa[-1:, 2:] _logits_eager[-1:, 2:] = logits_eager[-1:, 2:] elif padding_side == "right": _logits_sdpa[-1:, 2:] = logits_sdpa[-1:, :-2] _logits_eager[-1:, 2:] = logits_eager[-1:, :-2] logits_sdpa = _logits_sdpa logits_eager = _logits_eager results = [ torch.allclose(_logits_sdpa, _logits_eager, atol=atol, rtol=rtol) for (_logits_sdpa, _logits_eager) in zip(logits_sdpa, logits_eager) ] # If 80% batch elements have matched results, it's fine if np.mean(results) < 0.8: fail_cases.append( get_mean_reldiff(failcase, logits_sdpa, logits_eager, atol, rtol) ) self.assertTrue(len(fail_cases) == 0, "\n".join(fail_cases)) def test_requires_grad_with_frozen_encoders(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) model.freeze_audio_encoder() audio_encoder_grads = [param.requires_grad for param in model.audio_encoder.parameters()] text_encoder_grads = [param.requires_grad for param in model.text_encoder.parameters()] self.assertFalse(all(audio_encoder_grads)) self.assertTrue(all(text_encoder_grads)) model = model_class(config) model.freeze_text_encoder() audio_encoder_grads = [param.requires_grad for param in model.audio_encoder.parameters()] text_encoder_grads = [param.requires_grad for param in model.text_encoder.parameters()] self.assertTrue(all(audio_encoder_grads)) self.assertFalse(all(text_encoder_grads)) # Copied from tests.models.musicgen.test_modeling_musicgen.get_bip_bip def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000): """Produces a series of 'bip bip' sounds at a given frequency.""" timesteps = np.arange(int(duration * sample_rate)) / sample_rate wav = np.cos(2 * math.pi * 440 * timesteps) time_period = (timesteps % (2 * bip_duration)) / (2 * bip_duration) envelope = time_period >= 0.5 return wav * envelope @require_torch @require_torchaudio class MusicgenMelodyIntegrationTests(unittest.TestCase): @cached_property def model(self): return MusicgenMelodyForConditionalGeneration.from_pretrained("ylacombe/musicgen-melody").to(torch_device) @cached_property def processor(self): return MusicgenMelodyProcessor.from_pretrained("ylacombe/musicgen-melody") @slow def test_logits_text_prompt(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) # prepare the decoder inputs pad_token_id = model.generation_config.pad_token_id decoder_input_ids = ( torch.ones((input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long).to(torch_device) * pad_token_id ) with torch.no_grad(): logits = model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, ).logits # fmt: off EXPECTED_LOGITS = torch.tensor([ 1.1100, -2.1065, -3.7699, -0.7102, 1.3707, -1.7028, -2.6802, -6.0367, 1.0504, -2.5358, -4.3497, 0.7338, 0.4823, -2.5260, 1.2717, 1.5427 ]) # fmt: on EXPECTED_OUTPUT_LENGTH = input_ids.shape[1] + 1 + self.model.config.chroma_length logits_shape = ( input_ids.shape[0] * model.decoder.num_codebooks, EXPECTED_OUTPUT_LENGTH, model.decoder.config.vocab_size, ) self.assertTrue(logits.shape == logits_shape) torch.testing.assert_close(logits[0, -1, :16].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4) @slow def test_logits_text_audio_prompt(self): model = self.model processor = self.processor audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] text = ["80s music", "Club techno"] inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt") # prepare the text encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) # prepare the audio encoder inputs input_features = inputs.input_features.to(torch_device) # prepare the decoder inputs pad_token_id = model.generation_config.pad_token_id decoder_input_ids = ( torch.ones((input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long).to(torch_device) * pad_token_id ) with torch.no_grad(): logits = model( input_ids, attention_mask=attention_mask, input_features=input_features, decoder_input_ids=decoder_input_ids, ).logits # fmt: off EXPECTED_LOGITS = torch.tensor([ [ 0.7479, 0.3742, 0.6253, -7.9405, 0.7105, -6.9995, 0.7792, -3.0482], [-2.7905, 0.7492, -0.2556, -8.1586, -1.6740, 0.5771, -8.3650, -0.0908] ]) # fmt: on self.assertTrue(logits.shape == (8, 240, 2048)) torch.testing.assert_close(logits[1:3, -1, 32:40].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4) @slow def test_generate_unconditional_greedy(self): model = self.model # only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same unconditional_inputs = self.processor.get_unconditional_inputs(num_samples=1).to(torch_device) output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=10, guidance_scale=1.0) # fmt: off EXPECTED_VALUES = torch.tensor( [ 1.2741e-04, -8.0466e-05, 5.5789e-04, 1.0402e-03, 2.6547e-04, 1.5587e-05, -1.4210e-04, -9.7303e-05, 6.4504e-04, 5.0903e-04, 9.6474e-04, 1.0498e-03, 3.7210e-05, -5.3652e-04, -3.6579e-04, -2.5678e-04 ] ) # fmt: on self.assertTrue(output_values.shape == (1, 1, 4480)) torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4) @slow def test_generate_unconditional_sampling(self): model = self.model # for stochastic sampling we can generate multiple outputs unconditional_inputs = self.processor.get_unconditional_inputs(num_samples=2).to(torch_device) set_seed(0) output_values = model.generate( **unconditional_inputs, do_sample=True, max_new_tokens=10, guidance_scale=1.0, temperature=1.0, top_k=250 ) # fmt: off EXPECTED_VALUES = torch.tensor( [ -0.0085, -0.0160, 0.0028, 0.0005, -0.0095, 0.0028, -0.0122, -0.0299, -0.0052, -0.0145, 0.0092, 0.0063, -0.0378, -0.0621, -0.0784, -0.0120, ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4) @slow def test_generate_text_prompt_greedy(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) output_values = model.generate( input_ids, attention_mask=attention_mask, do_sample=False, guidance_scale=None, max_new_tokens=10 ) # fmt: off EXPECTED_VALUES = torch.tensor( [ 1.2741e-04, -8.0474e-05, 5.5789e-04, 1.0402e-03, 2.6547e-04, 1.5597e-05, -1.4210e-04, -9.7309e-05, 6.4504e-04, 5.0903e-04 ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) torch.testing.assert_close(output_values[0, 0, :10].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4) @slow def test_generate_text_prompt_greedy_with_classifier_free_guidance(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) output_values = model.generate( input_ids, attention_mask=attention_mask, do_sample=False, guidance_scale=3, max_new_tokens=10 ) # fmt: off EXPECTED_VALUES = torch.tensor( [ 1.2741e-04, -8.0474e-05, 5.5789e-04, 1.0402e-03, 2.6547e-04, 1.5597e-05, -1.4210e-04, -9.7309e-05, 6.4504e-04, 5.0903e-04, 9.6475e-04, 1.0499e-03, 3.7215e-05, -5.3651e-04, -3.6578e-04, -2.5678e-04 ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4) @slow def test_generate_text_prompt_sampling(self): model = self.model processor = self.processor inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") # prepare the encoder inputs input_ids = inputs.input_ids.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) set_seed(0) output_values = model.generate( input_ids, attention_mask=attention_mask, do_sample=True, guidance_scale=None, max_new_tokens=10, temperature=1.0, top_k=250, ) # fmt: off EXPECTED_VALUES = torch.tensor( [ -0.0165, -0.0222, -0.0041, -0.0058, -0.0145, -0.0023, -0.0160, -0.0310, -0.0055, -0.0127, 0.0104, 0.0105, -0.0326, -0.0611, -0.0744, -0.0083 ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4) @slow def test_generate_text_audio_prompt(self): model = self.model processor = self.processor audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] text = ["80s music", "Club techno"] inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt").to(torch_device) output_values = model.generate(**inputs, do_sample=False, guidance_scale=None, max_new_tokens=10) # fmt: off EXPECTED_VALUES = torch.tensor( [ -1.1999e-04, -2.2303e-04, 4.6296e-04, 1.0524e-03, 2.4827e-04, -4.0294e-05, -1.2468e-04, 4.9846e-05, 7.1484e-04, 4.4198e-04, 7.9063e-04, 8.8141e-04, -6.1807e-05, -6.1856e-04, -3.6235e-04, -2.7226e-04 ] ) # fmt: on self.assertTrue(output_values.shape == (2, 1, 4480)) torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4) @require_torch @require_torchaudio class MusicgenMelodyStereoIntegrationTests(unittest.TestCase): @cached_property def model(self): return MusicgenMelodyForConditionalGeneration.from_pretrained("ylacombe/musicgen-stereo-melody").to( torch_device ) @cached_property def processor(self): return MusicgenMelodyProcessor.from_pretrained("ylacombe/musicgen-stereo-melody") @slow def test_generate_unconditional_greedy(self): model = self.model # only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same unconditional_inputs = self.processor.get_unconditional_inputs(num_samples=1).to(torch_device) output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=12, guidance_scale=1.0) # fmt: off EXPECTED_VALUES_LEFT = torch.tensor( [ 1.2742e-04, -8.0480e-05, 5.5788e-04, 1.0401e-03, 2.6547e-04, 1.5587e-05, -1.4211e-04, -9.7308e-05, 6.4503e-04, 5.0903e-04, 9.6475e-04, 1.0499e-03, 3.7205e-05, -5.3652e-04, -3.6579e-04, 2.5679e-04 ] ) # fmt: on # (bsz, channels, seq_len) self.assertTrue(output_values.shape == (1, 2, 5760)) torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, rtol=6e-4, atol=6e-4) torch.testing.assert_close(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_LEFT, rtol=6e-4, atol=6e-4) @slow def test_generate_text_audio_prompt(self): model = self.model processor = self.processor audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] text = ["80s music", "Club techno"] inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt").to(torch_device) output_values = model.generate(**inputs, do_sample=False, guidance_scale=3.0, max_new_tokens=12) # fmt: off EXPECTED_VALUES_LEFT_FIRST_SAMPLE = torch.tensor( [ -0.0862, -0.1021, -0.0936, -0.0754, -0.0616, -0.0456, -0.0354, -0.0298, -0.0036, 0.0222, 0.0523, 0.0660, 0.0496, 0.0356, 0.0457, 0.0769 ] ) EXPECTED_VALUES_RIGHT_SECOND_SAMPLE = torch.tensor( [ -0.0327, -0.0450, -0.0264, -0.0278, -0.0365, -0.0272, -0.0401, -0.0574, -0.0413, -0.0508, -0.0269, -0.0323, -0.0762, -0.1115, -0.1390, -0.0790 ] ) # fmt: on # (bsz, channels, seq_len) self.assertTrue(output_values.shape == (2, 2, 5760)) torch.testing.assert_close( output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT_FIRST_SAMPLE, rtol=1e-4, atol=1e-4 ) torch.testing.assert_close( output_values[1, 1, :16].cpu(), EXPECTED_VALUES_RIGHT_SECOND_SAMPLE, rtol=1e-4, atol=1e-4 )
transformers/tests/models/musicgen_melody/test_modeling_musicgen_melody.py/0
{ "file_path": "transformers/tests/models/musicgen_melody/test_modeling_musicgen_melody.py", "repo_id": "transformers", "token_count": 48196 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from transformers import GemmaTokenizer, PaliGemmaProcessor from transformers.testing_utils import get_tests_dir, require_torch, require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import SiglipImageProcessor SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_vision class PaliGemmaProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = PaliGemmaProcessor def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = SiglipImageProcessor.from_pretrained("google/siglip-so400m-patch14-384") image_processor.image_seq_length = 0 tokenizer = GemmaTokenizer(SAMPLE_VOCAB, keep_accents=True) processor = PaliGemmaProcessor(image_processor=image_processor, tokenizer=tokenizer) processor.save_pretrained(self.tmpdirname) def tearDown(self): shutil.rmtree(self.tmpdirname) @require_torch @require_vision def test_image_seq_length(self): input_str = "lower newer" image_input = self.prepare_image_inputs() image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length") image_processor.image_seq_length = 14 processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) inputs = processor( text=input_str, images=image_input, return_tensors="pt", max_length=112, padding="max_length" ) self.assertEqual(len(inputs["input_ids"][0]), 112 + 14) def test_text_with_image_tokens(self): image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) text_multi_images = "<image><image>Dummy text!" text_single_image = "<image>Dummy text!" text_no_image = "Dummy text!" image = self.prepare_image_inputs() out_noimage = processor(text=text_no_image, images=image, return_tensors="np") out_singlimage = processor(text=text_single_image, images=image, return_tensors="np") for k in out_noimage: self.assertTrue(out_noimage[k].tolist() == out_singlimage[k].tolist()) out_multiimages = processor(text=text_multi_images, images=[image, image], return_tensors="np") out_noimage = processor(text=text_no_image, images=[[image, image]], return_tensors="np") # We can't be sure what is users intention, whether user want "one text + two images" or user forgot to add the second text with self.assertRaises(ValueError): out_noimage = processor(text=text_no_image, images=[image, image], return_tensors="np") for k in out_noimage: self.assertTrue(out_noimage[k].tolist() == out_multiimages[k].tolist()) text_batched = ["Dummy text!", "Dummy text!"] text_batched_with_image = ["<image>Dummy text!", "<image>Dummy text!"] out_images = processor(text=text_batched_with_image, images=[image, image], return_tensors="np") out_noimage_nested = processor(text=text_batched, images=[[image], [image]], return_tensors="np") out_noimage = processor(text=text_batched, images=[image, image], return_tensors="np") for k in out_noimage: self.assertTrue(out_noimage[k].tolist() == out_images[k].tolist() == out_noimage_nested[k].tolist())
transformers/tests/models/paligemma/test_processor_paligemma.py/0
{ "file_path": "transformers/tests/models/paligemma/test_processor_paligemma.py", "repo_id": "transformers", "token_count": 1576 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" class PerceiverTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "deepmind/language-perceiver" tokenizer_class = PerceiverTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() tokenizer = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname) @cached_property def perceiver_tokenizer(self): return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver") def get_tokenizer(self, **kwargs) -> PerceiverTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. toks = [] for i in range(len(tokenizer)): try: tok = tokenizer.decode([i], clean_up_tokenization_spaces=False) except UnicodeDecodeError: pass toks.append((i, tok)) toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def test_multibytes_char(self): tokenizer = self.perceiver_tokenizer src_text = "Unicode €." encoded = tokenizer(src_text) encoded_ids = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "[CLS]Unicode €.[SEP]") encoded = tokenizer("e è é ê ë") encoded_ids = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "[CLS]e è é ê ë[SEP]") # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "[CLS]e è é ê ë[SEP]") def test_prepare_batch_integration(self): tokenizer = self.perceiver_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: skip batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 38), batch.input_ids.shape) self.assertEqual((2, 38), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.perceiver_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length_integration(self): tokenizer = self.perceiver_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) # cannot use default save_and_load_tokenizer test method because tokenizer has no vocab def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00e9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00e9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens( {"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False ) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) # There is a conflict between the default value of extra_ids and adding a new special token through additional_special_tokens # We need to add the extra_ids in the list of the arg additional_special_tokens def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(125)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_decode_invalid_byte_id(self): tokenizer = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178]), "�") @unittest.skip(reason="tokenizer does not have vocabulary") def test_get_vocab(self): pass @unittest.skip(reason="inputs cannot be pretokenized") def test_pretokenized_inputs(self): # inputs cannot be pretokenized since ids depend on whole input string and not just on single characters pass @unittest.skip(reason="vocab does not exist") def test_conversion_reversible(self): pass def test_convert_tokens_to_string_format(self): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str)
transformers/tests/models/perceiver/test_tokenization_perceiver.py/0
{ "file_path": "transformers/tests/models/perceiver/test_tokenization_perceiver.py", "repo_id": "transformers", "token_count": 6120 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import unittest import numpy as np import requests from packaging import version from transformers.testing_utils import ( require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PixtralImageProcessor if is_torchvision_available(): from transformers import PixtralImageProcessorFast class PixtralImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, max_num_images_per_sample=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, patch_size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): super().__init__() size = size if size is not None else {"longest_edge": 24} patch_size = patch_size if patch_size is not None else {"height": 8, "width": 8} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.max_num_images_per_sample = max_num_images_per_sample self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.patch_size = patch_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "patch_size": self.patch_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_image_shape(self, images): if not isinstance(images, (list, tuple)): images = [images] batch_size = len(images) return_height, return_width = 0, 0 for image in images: if isinstance(image, Image.Image): width, height = image.size elif isinstance(image, np.ndarray): height, width = image.shape[:2] elif isinstance(image, torch.Tensor): height, width = image.shape[-2:] max_height = max_width = self.size.get("longest_edge") ratio = max(height / max_height, width / max_width) if ratio > 1: height = int(np.ceil(height / ratio)) width = int(np.ceil(width / ratio)) patch_height, patch_width = self.patch_size["height"], self.patch_size["width"] num_height_tokens = (height - 1) // patch_height + 1 num_width_tokens = (width - 1) // patch_width + 1 return_height = max(num_height_tokens * patch_height, return_height) return_width = max(num_width_tokens * patch_width, return_width) return batch_size, self.num_channels, return_height, return_width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): images = prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) return images @require_torch @require_vision class PixtralImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = PixtralImageProcessor if is_vision_available() else None fast_image_processing_class = PixtralImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = PixtralImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "patch_size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) # The following tests are overriden as PixtralImageProcessor can return images of different sizes # and thus doesn't support returning batched tensors def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs_list = self.image_processor_tester.prepare_image_inputs() for image in image_inputs_list: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs_list[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list[0]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs_list, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs_list = self.image_processor_tester.prepare_image_inputs(numpify=True) for image in image_inputs_list: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs_list[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list[0]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched batch_encoded_images = image_processing(image_inputs_list, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list) self.assertEqual(tuple(batch_encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs_list = self.image_processor_tester.prepare_image_inputs(torchify=True) for image in image_inputs_list: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs_list[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list[0]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched batch_encoded_images = image_processing(image_inputs_list, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs_list) self.assertEqual(tuple(batch_encoded_images.shape), expected_output_image_shape) @require_vision @require_torch def test_fast_is_faster_than_slow(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping speed test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping speed test as one of the image processors is not defined") def measure_time(image_processor, image): start = time.time() _ = image_processor(image, return_tensors="pt") return time.time() - start image_inputs_list = self.image_processor_tester.prepare_image_inputs(torchify=True) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) fast_time = measure_time(image_processor_fast, image_inputs_list) slow_time = measure_time(image_processor_slow, image_inputs_list) self.assertLessEqual(fast_time, slow_time) @require_vision @require_torch def test_slow_fast_equivalence(self): dummy_image = Image.open( requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw ) if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_image, return_tensors="pt") encoding_fast = image_processor_fast(dummy_image, return_tensors="pt") torch.testing.assert_close( encoding_slow.pixel_values[0][0], encoding_fast.pixel_values[0][0], rtol=100, atol=1e-1 ) @require_vision @require_torch def test_slow_fast_equivalence_batched(self): dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, return_tensors="pt") for i in range(len(encoding_slow.pixel_values)): self.assertTrue( torch.allclose(encoding_slow.pixel_values[i][0], encoding_fast.pixel_values[i][0], atol=1e-1) ) self.assertLessEqual( torch.mean(torch.abs(encoding_slow.pixel_values[i][0] - encoding_fast.pixel_values[i][0])).item(), 1e-3 ) torch.testing.assert_close( encoding_slow.pixel_values[0][0], encoding_fast.pixel_values[0][0], rtol=100, atol=1e-1 ) @slow @require_torch_gpu @require_vision def test_can_compile_fast_image_processor(self): if self.fast_image_processing_class is None: self.skipTest("Skipping compilation test as fast image processor is not defined") if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") torch.compiler.reset() input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8) image_processor = self.fast_image_processing_class(**self.image_processor_dict) output_eager = image_processor(input_image, device=torch_device, return_tensors="pt") image_processor = torch.compile(image_processor, mode="reduce-overhead") output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt") torch.testing.assert_close( output_eager.pixel_values[0][0], output_compiled.pixel_values[0][0], rtol=1e-4, atol=1e-4 ) @unittest.skip(reason="PixtralImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy def test_call_numpy_4_channels(self): pass
transformers/tests/models/pixtral/test_image_processing_pixtral.py/0
{ "file_path": "transformers/tests/models/pixtral/test_image_processing_pixtral.py", "repo_id": "transformers", "token_count": 6025 }
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class ProphetNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "microsoft/prophetnet-large-uncased" tokenizer_class = ProphetNetTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00e9d,running" output_text = "unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("UNwant\u00e9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11]) def test_chinese(self): tokenizer = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535a\u63a8zz"), ["ah", "\u535a", "\u63a8", "zz"]) def test_basic_tokenizer_lower(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["h\u00e9llo"]) def test_basic_tokenizer_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_default(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["hello"]) def test_basic_tokenizer_no_lower(self): tokenizer = BasicTokenizer(do_lower_case=False) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_respects_never_split_tokens(self): tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) @require_torch def test_prepare_batch(self): tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] batch = tokenizer(src_text, padding=True, return_tensors="pt") self.assertIsInstance(batch, BatchEncoding) result = list(batch.input_ids.numpy()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) def test_is_whitespace(self): self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00a0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) def test_is_control(self): self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) def test_is_punctuation(self): self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_2 + [102]
transformers/tests/models/prophetnet/test_tokenization_prophetnet.py/0
{ "file_path": "transformers/tests/models/prophetnet/test_tokenization_prophetnet.py", "repo_id": "transformers", "token_count": 3468 }
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class RobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "FacebookAI/roberta-base" tokenizer_class = RobertaTokenizer rust_tokenizer_class = RobertaTokenizerFast test_rust_tokenizer = True from_pretrained_kwargs = {"cls_token": "<s>"} def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "lower newer" bpe_tokens = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] tokens = tokenizer.tokenize(text) # , add_prefix_space=True) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def roberta_dict_integration_testing(self): tokenizer = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2]) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418", add_special_tokens=False), [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2], ) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("FacebookAI/roberta-base") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_text_from_decode = tokenizer.encode( "sequence builders", add_special_tokens=True, add_prefix_space=False ) encoded_pair_from_decode = tokenizer.encode( "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False ) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def test_space_encoding(self): tokenizer = self.get_tokenizer() sequence = "Encode this sequence." space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]] # Testing encoder arguments encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertNotEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertEqual(first_char, space_encoding) tokenizer.add_special_tokens({"bos_token": "<s>"}) encoded = tokenizer.encode(sequence, add_special_tokens=True) first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0] self.assertNotEqual(first_char, space_encoding) # Testing spaces after special tokens mask = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(mask, lstrip=True, rstrip=False)} ) # mask token has a left space mask_ind = tokenizer.convert_tokens_to_ids(mask) sequence = "Encode <mask> sequence" sequence_nospace = "Encode <mask>sequence" encoded = tokenizer.encode(sequence) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence_nospace) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertNotEqual(first_char, space_encoding) @unittest.skip def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def test_change_add_prefix_space_and_trim_offsets_args(self): for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2): tokenizer_r = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=True, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets ) pre_tokenizer_state = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__()) post_processor_state = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__()) self.assertEqual(pre_tokenizer_state["add_prefix_space"], add_prefix_space) self.assertEqual(post_processor_state["add_prefix_space"], add_prefix_space) self.assertEqual(post_processor_state["trim_offsets"], trim_offsets) def test_offsets_mapping_with_different_add_prefix_space_and_trim_space_arguments(self): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): text_of_1_token = "hello" # `hello` is a token in the vocabulary of `pretrained_name` text = f"{text_of_1_token} {text_of_1_token}" tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)), ) text = f" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), )
transformers/tests/models/roberta/test_tokenization_roberta.py/0
{ "file_path": "transformers/tests/models/roberta/test_tokenization_roberta.py", "repo_id": "transformers", "token_count": 7044 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import RTDetrResNetConfig from transformers.testing_utils import require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): from transformers import RTDetrResNetBackbone class RTDetrResNetModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) self.out_features = out_features self.out_indices = out_indices def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return RTDetrResNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, ) def create_and_check_backbone(self, config, pixel_values, labels): model = RTDetrResNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) # verify backbone works with out_features=None config.out_features = None model = RTDetrResNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class RTDetrResNetBackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (RTDetrResNetBackbone,) if is_torch_available() else () has_attentions = False config_class = RTDetrResNetConfig def setUp(self): self.model_tester = RTDetrResNetModelTester(self)
transformers/tests/models/rt_detr/test_modeling_rt_detr_resnet.py/0
{ "file_path": "transformers/tests/models/rt_detr/test_modeling_rt_detr_resnet.py", "repo_id": "transformers", "token_count": 1914 }
# coding=utf-8 # Copyright 2022 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow, torch_device from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bart.test_modeling_flax_bart import FlaxBartStandaloneDecoderModelTester from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..gpt2.test_modeling_flax_gpt2 import FlaxGPT2ModelTester from ..wav2vec2.test_modeling_flax_wav2vec2 import FlaxWav2Vec2ModelTester if is_flax_available(): import jax import jax.numpy as jnp from flax.training.common_utils import onehot from flax.traverse_util import flatten_dict from transformers import ( FlaxBartForCausalLM, FlaxBertForCausalLM, FlaxGPT2LMHeadModel, FlaxSpeechEncoderDecoderModel, FlaxWav2Vec2Model, SpeechEncoderDecoderConfig, ) from transformers.modeling_flax_outputs import FlaxBaseModelOutput from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import SpeechEncoderDecoderModel @require_flax class FlaxEncoderDecoderMixin: def get_encoder_decoder_model(self, config, decoder_config): raise NotImplementedError def prepare_config_and_inputs(self): raise NotImplementedError def get_pretrained_model(self): raise NotImplementedError def check_encoder_decoder_model_from_pretrained_configs( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) self.assertTrue(encoder_decoder_config.decoder.is_decoder) enc_dec_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) self.assertTrue(enc_dec_model.config.is_encoder_decoder) self.assertFalse(enc_dec_model.config.tie_word_embeddings) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) self.assertTrue(enc_dec_model.config.decoder.is_decoder) self.assertTrue(enc_dec_model.config.decoder.add_cross_attention) self.assertTrue(enc_dec_model.config.is_encoder_decoder) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) encoder_outputs = FlaxBaseModelOutput(last_hidden_state=outputs_encoder_decoder.encoder_hidden_states[-1]) outputs_encoder_decoder = enc_dec_model( attention_mask, decoder_input_ids, decoder_attention_mask, encoder_outputs=encoder_outputs ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_from_pretrained( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, return_dict, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_save_and_load( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: enc_dec_model.save_pretrained(tmpdirname) FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname) after_outputs = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 4e-2) def check_encoder_decoder_model_from_encoder_decoder_pretrained( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) # assert that loading encoder and decoder models from configs has been correctly executed self.assertEqual(config.add_adapter, encoder_model.config.add_adapter) self.assertEqual(decoder_config.use_cache, decoder_model.config.use_cache) with tempfile.TemporaryDirectory() as enc_tmpdir: with tempfile.TemporaryDirectory() as dec_tmpdir: encoder_model.save_pretrained(enc_tmpdir) decoder_model.save_pretrained(dec_tmpdir) # load a model from pretrained encoder and decoder checkpoints, setting one encoder and one decoder kwarg opposite to that specified in their respective configs enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=enc_tmpdir, decoder_pretrained_model_name_or_path=dec_tmpdir, encoder_add_adapter=not config.add_adapter, decoder_use_cache=not decoder_config.use_cache, ) # assert that setting encoder and decoder kwargs opposite to those in the configs has correctly been applied self.assertNotEqual(config.add_adapter, enc_dec_model.config.encoder.add_adapter) self.assertNotEqual(decoder_config.use_cache, enc_dec_model.config.decoder.use_cache) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_hidden_states=True, return_dict=True, ) self.assertEqual( outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)) ) def check_encoder_decoder_model_output_attentions( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): # make the decoder inputs a different shape from the encoder inputs to harden the test decoder_input_ids = decoder_input_ids[:, :-1] decoder_attention_mask = decoder_attention_mask[:, :-1] encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) outputs_encoder_decoder = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=True, ) encoder_attentions = outputs_encoder_decoder["encoder_attentions"] self.assertEqual(len(encoder_attentions), config.num_hidden_layers) seq_len = enc_dec_model._get_feat_extract_output_lengths(inputs.shape[1]) self.assertEqual(encoder_attentions[0].shape[-3:], (config.num_attention_heads, seq_len, seq_len)) decoder_attentions = outputs_encoder_decoder["decoder_attentions"] num_decoder_layers = ( decoder_config.num_decoder_layers if hasattr(decoder_config, "num_decoder_layers") else decoder_config.num_hidden_layers ) self.assertEqual(len(decoder_attentions), num_decoder_layers) self.assertEqual( decoder_attentions[0].shape[-3:], (decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]), ) cross_attentions = outputs_encoder_decoder["cross_attentions"] self.assertEqual(len(cross_attentions), num_decoder_layers) cross_attention_input_seq_len = decoder_input_ids.shape[-1] self.assertEqual( cross_attentions[0].shape[-3:], (decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len), ) def check_encoder_decoder_model_generate(self, inputs, config, decoder_config, **kwargs): encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config) kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model} enc_dec_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs) pad_token_id = enc_dec_model.config.decoder.pad_token_id eos_token_id = enc_dec_model.config.decoder.eos_token_id decoder_start_token_id = enc_dec_model.config.decoder.decoder_start_token_id # Copied from generation.utils (GPT2 doesn't have `pad_token_id`) if pad_token_id is None and eos_token_id is not None: pad_token_id = eos_token_id if decoder_start_token_id is None: decoder_start_token_id = enc_dec_model.config.decoder.bos_token_id # Bert does not have a bos token id, so use pad_token_id instead # Copied from `test_modeling_encoder_decoder.py` if decoder_start_token_id is None: decoder_start_token_id = pad_token_id generated_output = enc_dec_model.generate( inputs, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, ) generated_sequences = generated_output.sequences self.assertEqual(generated_sequences.shape, (inputs.shape[0],) + (decoder_config.max_length,)) def check_freeze_feature_encoder( self, config, inputs, attention_mask, encoder_hidden_states, decoder_config, decoder_input_ids, decoder_attention_mask, **kwargs, ): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) enc_dec_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) params = enc_dec_model.params def cross_entropy(logits, labels): return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1) # define a dummy loss function for computing the loss over a forward pass def compute_loss( params, inputs, attention_mask, decoder_input_ids, freeze_feature_encoder: bool = False, ): outputs_enc_dec = enc_dec_model( inputs=inputs, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, freeze_feature_encoder=freeze_feature_encoder, params=params, ) logits = outputs_enc_dec.logits vocab_size = logits.shape[-1] loss = cross_entropy(logits, onehot(labels=decoder_input_ids, num_classes=vocab_size)).sum() return (loss, logits) # transform the loss function to get the gradients grad_fn = jax.value_and_grad(compute_loss, has_aux=True) # compute the loss, logits, and gradients for the unfrozen model (loss, logits), grads = grad_fn( params, inputs, attention_mask, decoder_input_ids, freeze_feature_encoder=False ) # compare to the loss, logits and gradients for the frozen model (loss_frozen, logits_frozen), grads_frozen = grad_fn( params, inputs, attention_mask, decoder_input_ids, freeze_feature_encoder=True ) # ensure that the logits and losses remain precisely equal self.assertTrue((logits == logits_frozen).all()) self.assertEqual(loss, loss_frozen) grads = flatten_dict(grads) grads_frozen = flatten_dict(grads_frozen) # ensure that the dicts of gradients contain the same keys self.assertEqual(grads.keys(), grads_frozen.keys()) # ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain non-zero entries when unfrozen feature_extractor_grads = tuple(grads[k] for k in grads if "feature_extractor" in k) feature_extractor_grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" in k) for feature_extractor_grad, feature_extractor_grad_frozen in zip( feature_extractor_grads, feature_extractor_grads_frozen ): self.assertTrue((feature_extractor_grad_frozen == 0.0).all()) self.assertTrue((feature_extractor_grad > 0.0).any()) # ensure that the gradients of all unfrozen layers remain precisely equal, i.e. all layers excluding the frozen 'feature_extractor' grads = tuple(grads[k] for k in grads if "feature_extractor" not in k) grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" not in k) for grad, grad_frozen in zip(grads, grads_frozen): self.assertTrue((grad == grad_frozen).all()) def check_pt_flax_equivalence(self, pt_model, fx_model, inputs_dict): pt_model.to(torch_device) pt_model.eval() # prepare inputs flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()).to(torch_device) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output, pt_output.numpy(force=True), 1e-5) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(force=True), 1e-5) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(force=True), 1e-5) def check_equivalence_pt_to_flax(self, config, decoder_config, inputs_dict): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) pt_model = SpeechEncoderDecoderModel(encoder_decoder_config) fx_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def check_equivalence_flax_to_pt(self, config, decoder_config, inputs_dict): encoder_decoder_config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config) pt_model = SpeechEncoderDecoderModel(encoder_decoder_config) fx_model = FlaxSpeechEncoderDecoderModel(encoder_decoder_config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, inputs_dict) def test_encoder_decoder_model_from_pretrained_configs(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained_configs(**input_ids_dict) def test_encoder_decoder_model_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=False) def test_encoder_decoder_model_from_pretrained_return_dict(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_pretrained(**input_ids_dict, return_dict=True) def test_save_and_load_from_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_save_and_load(**input_ids_dict) def test_encoder_decoder_model_from_encoder_decoder_pretrained(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_from_encoder_decoder_pretrained(**input_ids_dict) def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) def test_freeze_feature_encoder(self): input_ids_dict = self.prepare_config_and_inputs() self.check_freeze_feature_encoder(**input_ids_dict) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() config = config_inputs_dict.pop("config") decoder_config = config_inputs_dict.pop("decoder_config") inputs_dict = config_inputs_dict # `encoder_hidden_states` is not used in model call/forward del inputs_dict["encoder_hidden_states"] # Avoid the case where a sequence has no place to attend (after combined with the causal attention mask) batch_size = inputs_dict["decoder_attention_mask"].shape[0] inputs_dict["decoder_attention_mask"] = np.concatenate( [np.ones(shape=(batch_size, 1)), inputs_dict["decoder_attention_mask"][:, 1:]], axis=1 ) # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. decoder_config.use_cache = False self.assertTrue(decoder_config.cross_attention_hidden_size is None) # check without `enc_to_dec_proj` projection decoder_config.hidden_size = config.hidden_size self.assertTrue(config.hidden_size == decoder_config.hidden_size) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) # check `enc_to_dec_proj` work as expected decoder_config.hidden_size = decoder_config.hidden_size * 2 self.assertTrue(config.hidden_size != decoder_config.hidden_size) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) # check `add_adapter` works as expected config.add_adapter = True self.assertTrue(config.add_adapter) self.check_equivalence_pt_to_flax(config, decoder_config, inputs_dict) self.check_equivalence_flax_to_pt(config, decoder_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2 = self.get_pretrained_model() inputs = ids_tensor([13, 5], model_2.config.encoder.vocab_size) decoder_input_ids = ids_tensor([13, 1], model_2.config.decoder.vocab_size) attention_mask = ids_tensor([13, 5], vocab_size=2) outputs = model_2( inputs=inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) out_2 = np.array(outputs[0]) out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = FlaxSpeechEncoderDecoderModel.from_pretrained(tmp_dirname) after_outputs = model_1( inputs=inputs, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) out_1 = np.array(after_outputs[0]) out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 4e-2) @require_flax class FlaxWav2Vec2GPT2ModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "openai-community/gpt2-medium" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxWav2Vec2Model(config) decoder_model = FlaxGPT2LMHeadModel(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxWav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = FlaxGPT2ModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, inputs, attention_mask) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "inputs": inputs, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, } @slow def test_flaxwav2vec2gpt2_pt_flax_equivalence(self): pt_model = SpeechEncoderDecoderModel.from_pretrained("jsnfly/wav2vec2-large-xlsr-53-german-gpt2") fx_model = FlaxSpeechEncoderDecoderModel.from_pretrained( "jsnfly/wav2vec2-large-xlsr-53-german-gpt2", from_pt=True ) pt_model.to(torch_device) pt_model.eval() # prepare inputs batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], fx_model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs_dict = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) pt_logits = pt_outputs.logits pt_outputs = pt_outputs.to_tuple() fx_outputs = fx_model(**inputs_dict) fx_logits = fx_outputs.logits fx_outputs = fx_outputs.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits.numpy(), 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict) fx_logits_loaded = fx_outputs_loaded.logits fx_outputs_loaded = fx_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits_loaded, pt_logits.numpy(), 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) pt_logits_loaded = pt_outputs_loaded.logits pt_outputs_loaded = pt_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2) @require_flax class FlaxWav2Vec2BartModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "bart-large" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxWav2Vec2Model(config) decoder_model = FlaxBartForCausalLM(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxWav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = FlaxBartStandaloneDecoderModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, inputs, attention_mask) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "inputs": inputs, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, } @slow def test_flaxwav2vec2bart_pt_flax_equivalence(self): pt_model = SpeechEncoderDecoderModel.from_pretrained("patrickvonplaten/wav2vec2-2-bart-large") fx_model = FlaxSpeechEncoderDecoderModel.from_pretrained( "patrickvonplaten/wav2vec2-2-bart-large", from_pt=True ) pt_model.to(torch_device) pt_model.eval() # prepare inputs batch_size = 13 input_values = floats_tensor([batch_size, 512], scale=1.0) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], fx_model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs_dict = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) pt_logits = pt_outputs.logits pt_outputs = pt_outputs.to_tuple() fx_outputs = fx_model(**inputs_dict) fx_logits = fx_outputs.logits fx_outputs = fx_outputs.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits.numpy(), 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict) fx_logits_loaded = fx_outputs_loaded.logits fx_outputs_loaded = fx_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits_loaded, pt_logits.numpy(), 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) pt_logits_loaded = pt_outputs_loaded.logits pt_outputs_loaded = pt_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2) @require_flax class FlaxWav2Vec2BertModelTest(FlaxEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( "facebook/wav2vec2-large-lv60", "google-bert/bert-large-uncased" ) batch_size = 13 input_values = floats_tensor([batch_size, 512], model.config.encoder.vocab_size) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return model, inputs def get_encoder_decoder_model(self, config, decoder_config): encoder_model = FlaxWav2Vec2Model(config) decoder_model = FlaxBertForCausalLM(decoder_config) return encoder_model, decoder_model def prepare_config_and_inputs(self): model_tester_encoder = FlaxWav2Vec2ModelTester(self, batch_size=13) model_tester_decoder = FlaxBertModelTester(self, batch_size=13) encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs() decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs_for_decoder() (config, inputs, attention_mask) = encoder_config_and_inputs ( decoder_config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, ) = decoder_config_and_inputs # make sure that cross attention layers are added decoder_config.add_cross_attention = True return { "config": config, "inputs": inputs, "attention_mask": attention_mask, "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, } @slow def test_flaxwav2vec2bert_pt_flax_equivalence(self): pt_model = SpeechEncoderDecoderModel.from_pretrained("speech-seq2seq/wav2vec2-2-bert-large") fx_model = FlaxSpeechEncoderDecoderModel.from_pretrained("speech-seq2seq/wav2vec2-2-bert-large", from_pt=True) pt_model.to(torch_device) pt_model.eval() # prepare inputs batch_size = 13 input_values = floats_tensor([batch_size, 512], fx_model.config.encoder.vocab_size) attention_mask = random_attention_mask([batch_size, 512]) decoder_input_ids = ids_tensor([batch_size, 4], fx_model.config.decoder.vocab_size) decoder_attention_mask = random_attention_mask([batch_size, 4]) inputs_dict = { "inputs": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } flax_inputs = inputs_dict pt_inputs = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) pt_logits = pt_outputs.logits pt_outputs = pt_outputs.to_tuple() fx_outputs = fx_model(**inputs_dict) fx_logits = fx_outputs.logits fx_outputs = fx_outputs.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits.numpy(), 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxSpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**inputs_dict) fx_logits_loaded = fx_outputs_loaded.logits fx_outputs_loaded = fx_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits_loaded, pt_logits.numpy(), 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = SpeechEncoderDecoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) pt_logits_loaded = pt_outputs_loaded.logits pt_outputs_loaded = pt_outputs_loaded.to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2)
transformers/tests/models/speech_encoder_decoder/test_modeling_flax_speech_encoder_decoder.py/0
{ "file_path": "transformers/tests/models/speech_encoder_decoder/test_modeling_flax_speech_encoder_decoder.py", "repo_id": "transformers", "token_count": 17916 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import tempfile import unittest from transformers import pipeline from transformers.testing_utils import ( require_bitsandbytes, require_timm, require_torch, require_vision, slow, torch_device, ) from transformers.utils.import_utils import is_timm_available, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import TimmWrapperConfig, TimmWrapperForImageClassification, TimmWrapperModel if is_timm_available(): import timm if is_vision_available(): from PIL import Image from transformers import TimmWrapperImageProcessor class TimmWrapperModelTester: def __init__( self, parent, model_name="timm/resnet18.a1_in1k", batch_size=3, image_size=32, num_channels=3, is_training=True, ): self.parent = parent self.model_name = model_name self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.is_training = is_training def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return TimmWrapperConfig.from_pretrained(self.model_name) def create_and_check_model(self, config, pixel_values): model = TimmWrapperModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 14, 14), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class TimmWrapperModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TimmWrapperModel, TimmWrapperForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": TimmWrapperModel, "image-classification": TimmWrapperForImageClassification} if is_torch_available() else {} ) test_resize_embeddings = False test_head_masking = False test_pruning = False has_attentions = False test_model_parallel = False def setUp(self): self.config_class = TimmWrapperConfig self.model_tester = TimmWrapperModelTester(self) self.config_tester = ConfigTester( self, config_class=self.config_class, has_text_modality=False, common_properties=[], model_name="timm/resnet18.a1_in1k", ) def test_config(self): self.config_tester.run_common_tests() def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # check all hidden states with torch.no_grad(): outputs = model(**inputs_dict, output_hidden_states=True) self.assertTrue( len(outputs.hidden_states) == 5, f"expected 5 hidden states, but got {len(outputs.hidden_states)}" ) expected_shapes = [[16, 16], [8, 8], [4, 4], [2, 2], [1, 1]] resulted_shapes = [list(h.shape[2:]) for h in outputs.hidden_states] self.assertListEqual(expected_shapes, resulted_shapes) # check we can select hidden states by indices with torch.no_grad(): outputs = model(**inputs_dict, output_hidden_states=[-2, -1]) self.assertTrue( len(outputs.hidden_states) == 2, f"expected 2 hidden states, but got {len(outputs.hidden_states)}" ) expected_shapes = [[2, 2], [1, 1]] resulted_shapes = [list(h.shape[2:]) for h in outputs.hidden_states] self.assertListEqual(expected_shapes, resulted_shapes) @unittest.skip(reason="TimmWrapper models doesn't have inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="TimmWrapper models doesn't have inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="TimmWrapper doesn't support output_attentions=True.") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="TimmWrapper doesn't support this.") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="TimmWrapper initialization is managed on the timm side") def test_initialization(self): pass @unittest.skip(reason="Need to use a timm model and there is no tiny model available.") def test_model_is_small(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_do_pooling_option(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.do_pooling = False model = TimmWrapperModel._from_config(config) # check there is no pooling with torch.no_grad(): output = model(**inputs_dict) self.assertIsNone(output.pooler_output) # check there is pooler output with torch.no_grad(): output = model(**inputs_dict, do_pooling=True) self.assertIsNotNone(output.pooler_output) def test_timm_config_labels(self): # test timm config with no labels checkpoint = "timm/resnet18.a1_in1k" config = TimmWrapperConfig.from_pretrained(checkpoint) self.assertIsNone(config.label2id) self.assertIsInstance(config.id2label, dict) self.assertEqual(len(config.id2label), 1000) self.assertEqual(config.id2label[1], "goldfish, Carassius auratus") # test timm config with labels in config checkpoint = "timm/eva02_large_patch14_clip_336.merged2b_ft_inat21" config = TimmWrapperConfig.from_pretrained(checkpoint) self.assertIsInstance(config.id2label, dict) self.assertEqual(len(config.id2label), 10000) self.assertEqual(config.id2label[1], "Sabella spallanzanii") self.assertIsInstance(config.label2id, dict) self.assertEqual(len(config.label2id), 10000) self.assertEqual(config.label2id["Sabella spallanzanii"], 1) # test custom labels are provided checkpoint = "timm/resnet18.a1_in1k" config = TimmWrapperConfig.from_pretrained(checkpoint, num_labels=2) self.assertEqual(config.num_labels, 2) self.assertEqual(config.id2label, {0: "LABEL_0", 1: "LABEL_1"}) self.assertEqual(config.label2id, {"LABEL_0": 0, "LABEL_1": 1}) # test with provided id2label and label2id checkpoint = "timm/resnet18.a1_in1k" config = TimmWrapperConfig.from_pretrained( checkpoint, num_labels=2, id2label={0: "LABEL_0", 1: "LABEL_1"}, label2id={"LABEL_0": 0, "LABEL_1": 1} ) self.assertEqual(config.num_labels, 2) self.assertEqual(config.id2label, {0: "LABEL_0", 1: "LABEL_1"}) self.assertEqual(config.label2id, {"LABEL_0": 0, "LABEL_1": 1}) # test save load checkpoint = "timm/resnet18.a1_in1k" config = TimmWrapperConfig.from_pretrained(checkpoint) with tempfile.TemporaryDirectory() as tmpdirname: config.save_pretrained(tmpdirname) restored_config = TimmWrapperConfig.from_pretrained(tmpdirname) self.assertEqual(config.num_labels, restored_config.num_labels) self.assertEqual(config.id2label, restored_config.id2label) self.assertEqual(config.label2id, restored_config.label2id) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_timm @require_vision class TimmWrapperModelIntegrationTest(unittest.TestCase): # some popular ones model_names_to_test = [ "vit_small_patch16_384.augreg_in21k_ft_in1k", "resnet50.a1_in1k", "tf_mobilenetv3_large_minimal_100.in1k", "swin_tiny_patch4_window7_224.ms_in1k", "ese_vovnet19b_dw.ra_in1k", "hrnet_w18.ms_aug_in1k", ] @slow def test_inference_image_classification_head(self): checkpoint = "timm/resnet18.a1_in1k" model = TimmWrapperForImageClassification.from_pretrained(checkpoint, device_map=torch_device).eval() image_processor = TimmWrapperImageProcessor.from_pretrained(checkpoint) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the shape and logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_label = 281 # tabby cat self.assertEqual(torch.argmax(outputs.logits).item(), expected_label) expected_slice = torch.tensor([-11.2618, -9.6192, -10.3205]).to(torch_device) resulted_slice = outputs.logits[0, :3] is_close = torch.allclose(resulted_slice, expected_slice, atol=1e-3) self.assertTrue(is_close, f"Expected {expected_slice}, but got {resulted_slice}") @slow def test_inference_with_pipeline(self): image = prepare_img() classifier = pipeline(model="timm/resnet18.a1_in1k", device=torch_device) result = classifier(image) # verify result expected_label = "tabby, tabby cat" expected_score = 0.4329 self.assertEqual(result[0]["label"], expected_label) self.assertAlmostEqual(result[0]["score"], expected_score, places=3) @slow @require_bitsandbytes def test_inference_image_classification_quantized(self): from transformers import BitsAndBytesConfig checkpoint = "timm/vit_small_patch16_384.augreg_in21k_ft_in1k" quantization_config = BitsAndBytesConfig(load_in_8bit=True) model = TimmWrapperForImageClassification.from_pretrained( checkpoint, quantization_config=quantization_config, device_map=torch_device ).eval() image_processor = TimmWrapperImageProcessor.from_pretrained(checkpoint) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the shape and logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_label = 281 # tabby cat self.assertEqual(torch.argmax(outputs.logits).item(), expected_label) expected_slice = torch.tensor([-2.4043, 1.4492, -0.5127]).to(outputs.logits.dtype) resulted_slice = outputs.logits[0, :3].cpu() is_close = torch.allclose(resulted_slice, expected_slice, atol=0.1) self.assertTrue(is_close, f"Expected {expected_slice}, but got {resulted_slice}") @slow def test_transformers_model_for_classification_is_equivalent_to_timm(self): # check that wrapper logits are the same as timm model logits image = prepare_img() for model_name in self.model_names_to_test: checkpoint = f"timm/{model_name}" with self.subTest(msg=model_name): # prepare inputs image_processor = TimmWrapperImageProcessor.from_pretrained(checkpoint) pixel_values = image_processor(images=image).pixel_values.to(torch_device) # load models model = TimmWrapperForImageClassification.from_pretrained(checkpoint, device_map=torch_device).eval() timm_model = timm.create_model(model_name, pretrained=True).to(torch_device).eval() with torch.inference_mode(): outputs = model(pixel_values) timm_outputs = timm_model(pixel_values) # check shape is the same self.assertEqual(outputs.logits.shape, timm_outputs.shape) # check logits are the same diff = (outputs.logits - timm_outputs).max().item() self.assertLess(diff, 1e-4) @slow def test_transformers_model_is_equivalent_to_timm(self): # check that wrapper logits are the same as timm model logits image = prepare_img() models_to_test = ["vit_small_patch16_224.dino"] + self.model_names_to_test for model_name in models_to_test: checkpoint = f"timm/{model_name}" with self.subTest(msg=model_name): # prepare inputs image_processor = TimmWrapperImageProcessor.from_pretrained(checkpoint) pixel_values = image_processor(images=image).pixel_values.to(torch_device) # load models model = TimmWrapperModel.from_pretrained(checkpoint, device_map=torch_device).eval() timm_model = timm.create_model(model_name, pretrained=True, num_classes=0).to(torch_device).eval() with torch.inference_mode(): outputs = model(pixel_values) timm_outputs = timm_model(pixel_values) # check shape is the same self.assertEqual(outputs.pooler_output.shape, timm_outputs.shape) # check logits are the same diff = (outputs.pooler_output - timm_outputs).max().item() self.assertLess(diff, 1e-4) @slow def test_save_load_to_timm(self): # test that timm model can be loaded to transformers, saved and then loaded back into timm model = TimmWrapperForImageClassification.from_pretrained( "timm/resnet18.a1_in1k", num_labels=10, ignore_mismatched_sizes=True ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # there is no direct way to load timm model from folder, use the same config + path to weights timm_model = timm.create_model( "resnet18", num_classes=10, checkpoint_path=f"{tmpdirname}/model.safetensors" ) # check that all weights are the same after reload different_weights = [] for (name1, param1), (name2, param2) in zip( model.timm_model.named_parameters(), timm_model.named_parameters() ): if param1.shape != param2.shape or not torch.equal(param1, param2): different_weights.append((name1, name2)) if different_weights: self.fail(f"Found different weights after reloading: {different_weights}")
transformers/tests/models/timm_wrapper/test_modeling_timm_wrapper.py/0
{ "file_path": "transformers/tests/models/timm_wrapper/test_modeling_timm_wrapper.py", "repo_id": "transformers", "token_count": 7069 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch UniSpeechSat model.""" import math import unittest import numpy as np import pytest from datasets import load_dataset from transformers import UniSpeechSatConfig, is_torch_available from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( UniSpeechSatForAudioFrameClassification, UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, UniSpeechSatModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) class UniSpeechSatModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, tdnn_dim=(32, 32), tdnn_kernel=(3, 3), tdnn_dilation=(1, 1), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return UniSpeechSatConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = UniSpeechSatModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = UniSpeechSatModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = UniSpeechSatForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = UniSpeechSatForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = UniSpeechSatForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = UniSpeechSatForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, *args): config.ctc_zero_infinity = True model = UniSpeechSatForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() # use a longer sequence length to account for TDNN temporal downsampling input_values = floats_tensor([self.batch_size, self.seq_length * 2], scale=1.0) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = UniSpeechSatForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class UniSpeechSatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatModel, UniSpeechSatForSequenceClassification, UniSpeechSatForAudioFrameClassification, UniSpeechSatForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": UniSpeechSatForSequenceClassification, "automatic-speech-recognition": UniSpeechSatForCTC, "feature-extraction": UniSpeechSatModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = UniSpeechSatModelTester(self) self.config_tester = ConfigTester(self, config_class=UniSpeechSatConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Model has no input_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Model has input_values instead of input_ids") def test_forward_signature(self): pass @unittest.skip(reason="Model has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Model has no input_embeds") def test_model_get_set_embeddings(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "label_embeddings_concat", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-base-plus") self.assertIsNotNone(model) @require_torch class UniSpeechSatRobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( (UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatModel, UniSpeechSatForSequenceClassification) if is_torch_available() else () ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = UniSpeechSatModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=UniSpeechSatConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Model has no input_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Model has input_values instead of input_ids") def test_forward_signature(self): pass @unittest.skip(reason="Model has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Model has no input_embeds") def test_model_get_set_embeddings(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "label_embeddings_concat", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_feature_prob_ctc_single_batch(self): model = UniSpeechSatForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_feature_prob=0.2, mask_time_length=2, mask_feature_length=2, ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True ) batch_duration_in_seconds = [6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (1, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-large") self.assertIsNotNone(model) @require_torch @require_soundfile @slow class UniSpeechSatModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test", trust_remote_code=True) return ds[:num_samples] def test_inference_encoder_base(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-base-plus") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), ) # fmt: off expected_hidden_states_slice = torch.tensor( [[[-0.0743, 0.1384], [-0.0845, 0.1704]], [[-0.0954, 0.1936], [-0.1123, 0.2095]]], device=torch_device, ) # fmt: on torch.testing.assert_close( outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, rtol=1e-3, atol=1e-3 ) def test_inference_encoder_large(self): model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-large") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-xlsr-53") input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), ) # fmt: off expected_hidden_states_slice = torch.tensor( [[[-0.1172, -0.0797], [-0.0012, 0.0213]], [[-0.1225, -0.1277], [-0.0668, -0.0585]]], device=torch_device, ) # fmt: on torch.testing.assert_close( outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, rtol=1e-3, atol=1e-3 ) def test_inference_diarization(self): model = UniSpeechSatForAudioFrameClassification.from_pretrained("microsoft/unispeech-sat-base-plus-sd").to( torch_device ) processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/unispeech-sat-base-plus-sd") input_data = self._load_superb("sd", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (outputs.logits > 0).long() # s3prl logits for the same batch expected_logits = torch.tensor( [ [[-5.6119, -5.5845], [-3.7772, -5.4824], [-3.6914, -5.1619], [-4.7560, -5.0496]], [[-6.3785, -4.8365], [-5.5863, -5.4149], [-5.5639, -4.8469], [-6.1511, -4.0052]], [[-6.0355, -3.7414], [-5.5968, -4.8061], [-5.4620, -4.7310], [-5.5864, -4.6078]], [[-5.9493, -4.8963], [-4.4050, -5.4476], [-4.1755, -5.1395], [-4.0272, -4.3705]], ], device=torch_device, ) self.assertEqual(labels[0, :, 0].sum(), 270) self.assertEqual(labels[0, :, 1].sum(), 647) torch.testing.assert_close(outputs.logits[:, :4], expected_logits, rtol=1e-2, atol=1e-2) def test_inference_speaker_verification(self): model = UniSpeechSatForXVector.from_pretrained("microsoft/unispeech-sat-base-plus-sv").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/unispeech-sat-base-plus-sv") input_data = self._load_superb("si", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) labels = torch.tensor([5, 1, 1, 3], device=torch_device).T with torch.no_grad(): input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) outputs = model(input_values, attention_mask=attention_mask, labels=labels) embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1) cosine_sim = torch.nn.CosineSimilarity(dim=-1) # id10002 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).item(), 0.9671, 3) # id10006 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).item(), 0.4941, 3) # id10002 vs id10004 self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.5616, 3) self.assertAlmostEqual(outputs.loss.item(), 18.5925, 2)
transformers/tests/models/unispeech_sat/test_modeling_unispeech_sat.py/0
{ "file_path": "transformers/tests/models/unispeech_sat/test_modeling_unispeech_sat.py", "repo_id": "transformers", "token_count": 17154 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VipLlava model.""" import unittest import requests from parameterized import parameterized from transformers import ( AutoProcessor, VipLlavaConfig, VipLlavaForConditionalGeneration, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( cleanup, require_bitsandbytes, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch if is_vision_available(): from PIL import Image # Copied from transformers.tests.models.llava.test_modeling_llava.LlavaVisionText2TextModelTester with Llava->VipLlava class VipLlavaVisionText2TextModelTester: # Ignore copy def __init__( self, parent, ignore_index=-100, image_token_index=0, projector_hidden_act="gelu", seq_length=7, vision_feature_layers=[0, 0, 1, 1, 0], text_config={ "model_type": "llama", "seq_length": 7, "is_training": True, "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 512, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 1, }, is_training=True, vision_config={ "batch_size": 12, "image_size": 8, "patch_size": 2, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, ): self.parent = parent self.ignore_index = ignore_index self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_layers = vision_feature_layers self.text_config = text_config self.vision_config = vision_config self.pad_token_id = text_config["pad_token_id"] self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = 3 self.image_size = 336 self.num_image_tokens = (self.vision_config["image_size"] // self.vision_config["patch_size"]) ** 2 self.seq_length = seq_length + self.num_image_tokens self.encoder_seq_length = self.seq_length def get_config(self): return VipLlavaConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, projector_hidden_act=self.projector_hidden_act, vision_feature_layers=self.vision_feature_layers, image_seq_length=self.num_image_tokens, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(1).to(torch_device) input_ids[input_ids == config.image_token_index] = self.pad_token_id input_ids[:, : self.num_image_tokens] = config.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch # Copied from transformers.tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest with Llava->VipLlava class VipLlavaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `VipLlavaForConditionalGeneration`. """ all_model_classes = (VipLlavaForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = (VipLlavaForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = {"image-text-to-text": VipLlavaForConditionalGeneration} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = True test_head_masking = False _is_composite = True def setUp(self): self.model_tester = VipLlavaVisionText2TextModelTester(self) common_properties = ["image_token_index", "vision_feature_layers", "image_seq_length"] self.config_tester = ConfigTester( self, config_class=VipLlavaConfig, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs # while some other models require pixel_values to be present def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] inputs_embeds = model.get_input_embeddings()(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] torch.testing.assert_close(out_embeds, out_ids) # Copied from tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_mismatching_num_image_tokens def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) _ = model(**input_dict) # successfull forward with no modifications # remove one image but leave the image token in text input_dict["pixel_values"] = input_dict["pixel_values"][-1:, ...] with self.assertRaises(ValueError): _ = model(**input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = input_dict["input_ids"][:1] pixel_values = input_dict["pixel_values"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model(input_ids=input_ids, pixel_values=pixel_values) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) _ = model(input_ids=input_ids, pixel_values=pixel_values) @parameterized.expand( [ (-1,), ([-1],), ([-1, -2],), ], ) def test_vision_feature_layers(self, vision_feature_layers): """ Test that we can use either one vision feature layer, or a list of vision feature layers. """ # NOTE: vipllava uses vision_feature_layers instead of vision_feature_layer as the # config key. The reason is that other llava classes supported one vision feature layer # and added support for a list of layers with granite vision support, while vipllava # originally supported multiple feature layers, and added support for a single layer for # for compatibility reasons. config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.vision_feature_layers = vision_feature_layers num_feature_layers = 1 if isinstance(vision_feature_layers, int) else len(vision_feature_layers) hidden_size = config.vision_config.hidden_size expected_features = hidden_size * num_feature_layers for model_class in self.all_model_classes: model = model_class(config).to(torch_device) # We should have the right number of input features, # and should be able to run a forward pass without exploding assert model.multi_modal_projector.linear_1.in_features == expected_features model(**input_dict) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Compile not yet supported because it is not yet supported in LLava") def test_sdpa_can_compile_dynamic(self): pass @unittest.skip(reason="Compile not yet supported because in LLava models") def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip("FlashAttention only support fp16 and bf16 data type") def test_flash_attn_2_fp32_ln(self): pass @unittest.skip( "VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test" ) def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass @require_torch class VipLlavaForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("llava-hf/vip-llava-7b-hf") def tearDown(self): cleanup(torch_device, gc_collect=True) @slow @require_bitsandbytes def test_small_model_integration_test(self): model_id = "llava-hf/vip-llava-7b-hf" model = VipLlavaForConditionalGeneration.from_pretrained(model_id, load_in_4bit=True) processor = AutoProcessor.from_pretrained(model_id) url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png" image = Image.open(requests.get(url, stream=True).raw) prompt = "USER: <image>\nCan you please describe this image?\nASSISTANT:" inputs = processor(prompt, image, return_tensors="pt").to(torch_device, torch.float16) outputs = model.generate(**inputs, max_new_tokens=10) EXPECTED_OUTPUT = "USER: \nCan you please describe this image?\nASSISTANT: The image features a brown and white cat sitting on" self.assertEqual(processor.decode(outputs[0], skip_special_tokens=True), EXPECTED_OUTPUT)
transformers/tests/models/vipllava/test_modeling_vipllava.py/0
{ "file_path": "transformers/tests/models/vipllava/test_modeling_vipllava.py", "repo_id": "transformers", "token_count": 5978 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the TensorFlow ViT model.""" from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel from transformers.modeling_tf_utils import keras if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class TFViTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, attn_implementation="eager", ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.attn_implementation = attn_implementation # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, pixel_values, labels): model = TFViTModel(config=config) result = model(pixel_values, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # Test with an image with different size than the one specified in config. image_size = self.image_size // 2 pixel_values = pixel_values[:, :, :image_size, :image_size] result = model(pixel_values, interpolate_pos_encoding=True, training=False) seq_length = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFViTForImageClassification(config) result = model(pixel_values, labels=labels, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # Test with an image with different size than the one specified in config. image_size = self.image_size // 2 pixel_values = pixel_values[:, :, :image_size, :image_size] result = model(pixel_values, interpolate_pos_encoding=True, training=False) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = TFViTForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFViTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_tf_common.py, as ViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification} if is_tf_available() else {} ) test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFViTModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ViT does not use inputs_embeds") def test_graph_mode_with_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFViTModel.from_pretrained("google/vit-base-patch16-224") self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFViTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.2744, 0.8215, -0.0836]) tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4)
transformers/tests/models/vit/test_modeling_tf_vit.py/0
{ "file_path": "transformers/tests/models/vit/test_modeling_tf_vit.py", "repo_id": "transformers", "token_count": 3984 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VitPose backbone model.""" import inspect import unittest from transformers import VitPoseBackboneConfig from transformers.testing_utils import require_torch from transformers.utils import is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): from transformers import VitPoseBackbone if is_vision_available(): pass class VitPoseBackboneModelTester: def __init__( self, parent, batch_size=13, image_size=[16 * 8, 12 * 8], patch_size=[8, 8], num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope # in VitPoseBackbone, the seq length equals the number of patches num_patches = (image_size[0] // patch_size[0]) * (image_size[1] // patch_size[1]) self.seq_length = num_patches def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return VitPoseBackboneConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, num_labels=self.num_labels, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VitPoseBackboneModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as VitPoseBackbone does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (VitPoseBackbone,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VitPoseBackboneModelTester(self) self.config_tester = ConfigTester( self, config_class=VitPoseBackboneConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="VitPoseBackbone does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="VitPoseBackbone does not support input and output embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="VitPoseBackbone does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="VitPoseBackbone does not support feedforward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="VitPoseBackbone does not output a loss") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="VitPoseBackbone does not support training yet") def test_training(self): pass @unittest.skip(reason="VitPoseBackbone does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="VitPoseBackbone does not support training yet") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="VitPoseBackbone does not support training yet") def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) @require_torch class VitPoseBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (VitPoseBackbone,) if is_torch_available() else () config_class = VitPoseBackboneConfig has_attentions = False def setUp(self): self.model_tester = VitPoseBackboneModelTester(self)
transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py/0
{ "file_path": "transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py", "repo_id": "transformers", "token_count": 2864 }