code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class _A ( unittest.TestCase ): def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __snake_case : int = tempfile.mkdtemp() __snake_case : int = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) __snake_case : Union[str, Any] = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } __snake_case : Any = os.path.join(self.tmpdirname , __magic_name__ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__magic_name__ , __magic_name__ ) def lowercase__ ( self : str , **__magic_name__ : Any ) -> str: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowercase__ ( self : Any , **__magic_name__ : Union[str, Any] ) -> Dict: """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowercase__ ( self : Any , **__magic_name__ : str ) -> int: """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : Any ) -> Tuple: """simple docstring""" __snake_case : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __snake_case : List[Any] = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase__ ( self : str ) -> Union[str, Any]: """simple docstring""" __snake_case : str = self.get_tokenizer() __snake_case : Optional[Any] = self.get_rust_tokenizer() __snake_case : Any = self.get_image_processor() __snake_case : str = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) processor_slow.save_pretrained(self.tmpdirname ) __snake_case : int = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ ) __snake_case : Optional[Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) processor_fast.save_pretrained(self.tmpdirname ) __snake_case : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __magic_name__ ) self.assertIsInstance(processor_fast.tokenizer , __magic_name__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __magic_name__ ) self.assertIsInstance(processor_fast.image_processor , __magic_name__ ) def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case : int = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __snake_case : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __snake_case : int = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) __snake_case : Union[str, Any] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def lowercase__ ( self : Any ) -> Dict: """simple docstring""" __snake_case : List[Any] = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : Optional[Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) __snake_case : Dict = self.prepare_image_inputs() __snake_case : int = image_processor(__magic_name__ , return_tensors="""np""" ) __snake_case : Optional[Any] = processor(images=__magic_name__ , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase__ ( self : Optional[Any] ) -> str: """simple docstring""" __snake_case : List[str] = self.get_image_processor() __snake_case : List[str] = self.get_tokenizer() __snake_case : Union[str, Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) __snake_case : Union[str, Any] = """lower newer""" __snake_case : List[str] = processor(text=__magic_name__ ) __snake_case : List[str] = tokenizer(__magic_name__ , padding="""max_length""" , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __snake_case : int = self.get_image_processor() __snake_case : List[str] = self.get_tokenizer() __snake_case : Tuple = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) __snake_case : Any = """lower newer""" __snake_case : Dict = self.prepare_image_inputs() __snake_case : int = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def lowercase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" __snake_case : Dict = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : Tuple = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) __snake_case : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case : List[Any] = processor.batch_decode(__magic_name__ ) __snake_case : Optional[int] = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = self.get_image_processor() __snake_case : Any = self.get_tokenizer() __snake_case : Optional[Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) __snake_case : List[Any] = """lower newer""" __snake_case : Union[str, Any] = self.prepare_image_inputs() __snake_case : Optional[int] = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
13
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for attribute in key.split(""".""" ): __snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case : Union[str, Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : Tuple = value elif weight_type == "bias": __snake_case : str = value else: __snake_case : List[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = [] __snake_case : List[Any] = fairseq_model.state_dict() __snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : Any = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __snake_case : Dict = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2] __snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase ) if "weight_g" in name: __snake_case : Dict = """weight_g""" elif "weight_v" in name: __snake_case : List[str] = """weight_v""" elif "weight" in name: __snake_case : str = """weight""" elif "bias" in name: __snake_case : int = """bias""" else: __snake_case : int = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Dict = full_name.split("""conv_layers.""" )[-1] __snake_case : Optional[int] = name.split(""".""" ) __snake_case : Dict = int(items[0] ) __snake_case : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case : List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : List[str] = SEWConfig() if is_finetuned: __snake_case : List[Any] = model.wav_encoder.wav_model.cfg else: __snake_case : Optional[Any] = model.cfg __snake_case : Tuple = fs_config.conv_bias __snake_case : List[Any] = eval(fs_config.conv_feature_layers ) __snake_case : List[Any] = [x[0] for x in conv_layers] __snake_case : Dict = [x[1] for x in conv_layers] __snake_case : Tuple = [x[2] for x in conv_layers] __snake_case : List[str] = """gelu""" __snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" __snake_case : Optional[int] = 0.0 __snake_case : Optional[Any] = fs_config.activation_fn.name __snake_case : Dict = fs_config.encoder_embed_dim __snake_case : Dict = 0.02 __snake_case : Any = fs_config.encoder_ffn_embed_dim __snake_case : Tuple = 1E-5 __snake_case : Dict = fs_config.encoder_layerdrop __snake_case : Any = fs_config.encoder_attention_heads __snake_case : int = fs_config.conv_pos_groups __snake_case : Tuple = fs_config.conv_pos __snake_case : Optional[int] = len(_lowerCamelCase ) __snake_case : int = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : Union[str, Any] = model.cfg __snake_case : Tuple = fs_config.final_dropout __snake_case : Tuple = fs_config.layerdrop __snake_case : Any = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : List[Any] = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : str = fs_config.mask_channel_length __snake_case : Any = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : str = fs_config.mask_prob __snake_case : str = """Wav2Vec2FeatureExtractor""" __snake_case : Dict = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int: """simple docstring""" if is_finetuned: __snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase ) else: __snake_case : int = convert_config(model[0] , _lowerCamelCase ) __snake_case : Dict = model[0].eval() __snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __snake_case : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if is_finetuned: if dict_path: __snake_case : str = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Union[str, Any] = target_dict.pad_index __snake_case : Optional[Any] = target_dict.bos_index __snake_case : Tuple = target_dict.pad_index __snake_case : List[str] = target_dict.bos_index __snake_case : Optional[Any] = target_dict.eos_index __snake_case : List[str] = len(target_dict.symbols ) __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" ) if not os.path.isdir(_lowerCamelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _lowerCamelCase ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case : List[str] = SEWForCTC(_lowerCamelCase ) else: __snake_case : List[str] = SEWModel(_lowerCamelCase ) feature_extractor.save_pretrained(_lowerCamelCase ) recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __UpperCamelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
13
1
'''simple docstring''' def _a ( _lowerCamelCase = 3 , _lowerCamelCase = 7 , _lowerCamelCase = 100_0000 ) -> int: """simple docstring""" __snake_case : Dict = 0 __snake_case : Tuple = 1 for current_denominator in range(1 , limit + 1 ): __snake_case : List[str] = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: __snake_case : int = current_numerator __snake_case : int = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1000000))
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : Optional[int] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( _lowerCamelCase = 5000 ) -> int: """simple docstring""" __snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[int] = pentagonal_nums[j] __snake_case : str = pentagonal_i + pentagonal_j __snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
13
1
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
13
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : List[Any] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __snake_case : int = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __snake_case : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above __snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __snake_case : Dict = output[output != -float("""inf""" )] __snake_case : Optional[Any] = tf.cast( tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @require_tf class _A ( unittest.TestCase , __lowercase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase__: Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" __snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Optional[int] = 2 __snake_case : str = 2 class _A ( tf.Module ): def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Dict = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" __snake_case : Tuple = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : int = [[2, 0], [1_02, 1_03]] __snake_case : Tuple = [[1, 0], [1, 1]] __snake_case : Union[str, Any] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for batch_size in range(1 , len(__magic_name__ ) + 1 ): __snake_case : Union[str, Any] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } __snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""] __snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Dict = 1 __snake_case : int = 2 class _A ( tf.Module ): def __init__( self : Tuple , __magic_name__ : List[str] ) -> int: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : Union[str, Any] = [[2], [1_02, 1_03]] __snake_case : Tuple = [[1], [1, 1]] __snake_case : List[str] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for input_row in range(len(__magic_name__ ) ): __snake_case : Tuple = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } __snake_case : str = serving_func(**__magic_name__ )["""sequences"""] __snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow @require_tensorflow_text def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ ) class _A ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ) -> int: """simple docstring""" super().__init__() __snake_case : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() ) __snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ ) __snake_case , __snake_case : List[Any] = text.pad_model_inputs( __magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ ) return self.tokenizer.detokenize(__magic_name__ ) __snake_case : int = CompleteSentenceTransformer() __snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) __snake_case : Tuple = complete_model(__magic_name__ ) __snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ ) keras_model.save(__magic_name__ ) def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } __snake_case : str = 14 __snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : int = """Hello, my dog is cute and""" __snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" ) __snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : List[Any] = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __snake_case : Dict = [6_38, 1_98] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : str = """Hugging Face is a technology company based in New York and Paris.""" __snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids __snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : int = bart_model.generate(__magic_name__ ).numpy() class _A ( __lowercase ): def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) ) class _A ( bart_model.model.encoder.__class__ ): def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) __snake_case : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __snake_case : Dict = bart_model.generate(__magic_name__ ).numpy() with self.assertRaises(__magic_name__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__magic_name__ , foo="""bar""" )
13
1
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def _a ( _lowerCamelCase , _lowerCamelCase ) -> Generator[tuple[str, ...], None, None]: """simple docstring""" __snake_case : Union[str, Any] = iter(_lowerCamelCase ) while True: __snake_case : str = tuple(itertools.islice(_lowerCamelCase , _lowerCamelCase ) ) if not chunk: return yield chunk def _a ( _lowerCamelCase ) -> str: """simple docstring""" __snake_case : Optional[int] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] ) __snake_case : List[Any] = """""" if len(_lowerCamelCase ) < 2: return dirty for i in range(len(_lowerCamelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowerCamelCase ) & 1: clean += "X" return clean def _a ( _lowerCamelCase ) -> list[str]: """simple docstring""" __snake_case : List[str] = """ABCDEFGHIKLMNOPQRSTUVWXYZ""" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler __snake_case : Optional[Any] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowerCamelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowerCamelCase ) return table def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : Dict = generate_table(_lowerCamelCase ) __snake_case : int = prepare_input(_lowerCamelCase ) __snake_case : List[Any] = """""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowerCamelCase , 2 ): __snake_case , __snake_case : Union[str, Any] = divmod(table.index(_lowerCamelCase ) , 5 ) __snake_case , __snake_case : Optional[Any] = divmod(table.index(_lowerCamelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : Optional[Any] = generate_table(_lowerCamelCase ) __snake_case : Tuple = """""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowerCamelCase , 2 ): __snake_case , __snake_case : List[str] = divmod(table.index(_lowerCamelCase ) , 5 ) __snake_case , __snake_case : List[str] = divmod(table.index(_lowerCamelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
13
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" __snake_case : int = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def _a ( _lowerCamelCase ) -> None: """simple docstring""" __snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("""""" ) print(len(_lowerCamelCase ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
1
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants __UpperCamelCase = Mapping[str, np.ndarray] __UpperCamelCase = Mapping[str, Any] # Is a nested dict. __UpperCamelCase = 0.01 @dataclasses.dataclass(frozen=__lowercase ) class _A : lowercase__: np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowercase__: np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowercase__: np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowercase__: np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowercase__: np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowercase__: Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files lowercase__: Optional[str] = None # Templates used to generate this protein (prediction-only) lowercase__: Optional[Sequence[str]] = None # Chain corresponding to each parent lowercase__: Optional[Sequence[int]] = None def _a ( _lowerCamelCase ) -> Protein: """simple docstring""" __snake_case : List[str] = R"""(\[[A-Z]+\]\n)""" __snake_case : List[str] = [tag.strip() for tag in re.split(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) > 0] __snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] ) __snake_case : List[str] = ["N", "CA", "C"] __snake_case : Dict = None __snake_case : Any = None __snake_case : Optional[int] = None for g in groups: if "[PRIMARY]" == g[0]: __snake_case : Union[str, Any] = g[1][0].strip() for i in range(len(_lowerCamelCase ) ): if seq[i] not in residue_constants.restypes: __snake_case : Any = """X""" # FIXME: strings are immutable __snake_case : int = np.array( [residue_constants.restype_order.get(_lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: __snake_case : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(_lowerCamelCase , g[1][axis].split() ) ) ) __snake_case : int = np.array(_lowerCamelCase ) __snake_case : Optional[int] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(_lowerCamelCase ): __snake_case : Union[str, Any] = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: __snake_case : List[Any] = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) ) __snake_case : Tuple = np.zeros( ( len(_lowerCamelCase ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(_lowerCamelCase ): __snake_case : List[str] = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=_lowerCamelCase , atom_mask=_lowerCamelCase , aatype=_lowerCamelCase , residue_index=np.arange(len(_lowerCamelCase ) ) , b_factors=_lowerCamelCase , ) def _a ( _lowerCamelCase , _lowerCamelCase = 0 ) -> List[str]: """simple docstring""" __snake_case : List[str] = [] __snake_case : Optional[int] = prot.remark if remark is not None: pdb_headers.append(F'''REMARK {remark}''' ) __snake_case : List[str] = prot.parents __snake_case : List[str] = prot.parents_chain_index if parents is not None and parents_chain_index is not None: __snake_case : str = [p for i, p in zip(_lowerCamelCase , _lowerCamelCase ) if i == chain_id] if parents is None or len(_lowerCamelCase ) == 0: __snake_case : Optional[int] = ["""N/A"""] pdb_headers.append(F'''PARENT {" ".join(_lowerCamelCase )}''' ) return pdb_headers def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : List[str] = [] __snake_case : Dict = pdb_str.split("""\n""" ) __snake_case : Dict = prot.remark if remark is not None: out_pdb_lines.append(F'''REMARK {remark}''' ) __snake_case : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: __snake_case : Dict = [] if prot.parents_chain_index is not None: __snake_case : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(_lowerCamelCase ) , [] ) parent_dict[str(_lowerCamelCase )].append(_lowerCamelCase ) __snake_case : Optional[Any] = max([int(_lowerCamelCase ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): __snake_case : Any = parent_dict.get(str(_lowerCamelCase ) , ["""N/A"""] ) parents_per_chain.append(_lowerCamelCase ) else: parents_per_chain.append(list(prot.parents ) ) else: __snake_case : str = [["""N/A"""]] def make_parent_line(_lowerCamelCase ) -> str: return F'''PARENT {" ".join(_lowerCamelCase )}''' out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) __snake_case : str = 0 for i, l in enumerate(_lowerCamelCase ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(_lowerCamelCase ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(_lowerCamelCase ): __snake_case : Any = parents_per_chain[chain_counter] else: __snake_case : Any = ["""N/A"""] out_pdb_lines.append(make_parent_line(_lowerCamelCase ) ) return "\n".join(_lowerCamelCase ) def _a ( _lowerCamelCase ) -> str: """simple docstring""" __snake_case : Tuple = residue_constants.restypes + ["""X"""] def res_atoa(_lowerCamelCase ) -> str: return residue_constants.restype_atoa.get(restypes[r] , """UNK""" ) __snake_case : int = residue_constants.atom_types __snake_case : List[str] = [] __snake_case : str = prot.atom_mask __snake_case : int = prot.aatype __snake_case : Optional[Any] = prot.atom_positions __snake_case : int = prot.residue_index.astype(np.intaa ) __snake_case : List[str] = prot.b_factors __snake_case : str = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("""Invalid aatypes.""" ) __snake_case : List[Any] = get_pdb_headers(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: pdb_lines.extend(_lowerCamelCase ) __snake_case : Any = aatype.shape[0] __snake_case : str = 1 __snake_case : Any = 0 __snake_case : List[Any] = string.ascii_uppercase __snake_case : Union[str, Any] = None # Add all atom sites. for i in range(_lowerCamelCase ): __snake_case : Optional[Any] = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(_lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue __snake_case : List[str] = """ATOM""" __snake_case : Dict = atom_name if len(_lowerCamelCase ) == 4 else F''' {atom_name}''' __snake_case : int = """""" __snake_case : List[Any] = """""" __snake_case : Optional[Any] = 1.00 __snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works. __snake_case : Tuple = """""" __snake_case : List[str] = """A""" if chain_index is not None: __snake_case : Union[str, Any] = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! __snake_case : Optional[Any] = ( F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}''' F'''{res_name_a:>3} {chain_tag:>1}''' F'''{residue_index[i]:>4}{insertion_code:>1} ''' F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}''' F'''{occupancy:>6.2f}{b_factor:>6.2f} ''' F'''{element:>2}{charge:>2}''' ) pdb_lines.append(_lowerCamelCase ) atom_index += 1 __snake_case : str = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: __snake_case : int = True __snake_case : List[Any] = chain_index[i + 1] if should_terminate: # Close the chain. __snake_case : Optional[Any] = """TER""" __snake_case : Optional[Any] = ( F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}''' ) pdb_lines.append(_lowerCamelCase ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(_lowerCamelCase , _lowerCamelCase ) ) pdb_lines.append("""END""" ) pdb_lines.append("""""" ) return "\n".join(_lowerCamelCase ) def _a ( _lowerCamelCase ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) -> Protein: """simple docstring""" return Protein( aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=_lowerCamelCase , remark=_lowerCamelCase , parents=_lowerCamelCase , parents_chain_index=_lowerCamelCase , )
13
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCamelCase = logging.getLogger(__name__) class _A ( __lowercase ): def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" super().__init__( __magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , ) __snake_case : List[str] = None def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]: """simple docstring""" logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __snake_case : List[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __snake_case : List[str] = str(distributed_port + 1 ) __snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase__ ( self : int ) -> int: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ ) dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group ) return target_tensor def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ ) return ifname def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): __snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ ) # distributed training __snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic __snake_case : Tuple = None if self._is_main(): __snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )] dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group ) # scatter logic __snake_case : Optional[int] = question_hidden_states.shape[0] __snake_case : Optional[Any] = [] __snake_case : Any = [] if self._is_main(): assert len(__magic_name__ ) == world_size __snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ ) __snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa ) __snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
13
1
'''simple docstring''' def _a ( _lowerCamelCase = 200_0000 ) -> int: """simple docstring""" __snake_case : Any = [0 for i in range(n + 1 )] __snake_case : Union[str, Any] = 1 __snake_case : List[Any] = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , _lowerCamelCase ): __snake_case : Any = 1 __snake_case : int = 0 for i in range(_lowerCamelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"""{solution() = }""")
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class _A : lowercase__: str lowercase__: Optional[str] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return self.major, self.minor, self.patch def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return Version(__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): return other raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' ) def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]: """simple docstring""" try: __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) return self.tuple < other.tuple def __hash__( self : Any ) -> Any: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase__ ( self : str ) -> str: """simple docstring""" return self.version_str def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase ) if not res: raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" return ".".join(str(_lowerCamelCase ) for v in version_tuple )
13
1
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} __UpperCamelCase = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", }, "tokenizer_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json", }, } # TODO(PVP) - this should be removed in Transformers v5 __UpperCamelCase = { "t5-small": 512, "t5-base": 512, "t5-large": 512, "t5-3b": 512, "t5-11b": 512, } class _A ( __lowercase ): lowercase__: List[str] = VOCAB_FILES_NAMES lowercase__: Optional[int] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: Any = ['''input_ids''', '''attention_mask'''] lowercase__: Any = TaTokenizer lowercase__: List[int] = [] def __init__( self : List[str] , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : List[str]="</s>" , __magic_name__ : Any="<unk>" , __magic_name__ : Any="<pad>" , __magic_name__ : int=1_00 , __magic_name__ : List[Any]=None , **__magic_name__ : Optional[Any] , ) -> Tuple: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __snake_case : Optional[int] = [f'''<extra_id_{i}>''' for i in range(__magic_name__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens __snake_case : Tuple = len(set(filter(lambda __magic_name__ : bool("""extra_id_""" in str(__magic_name__ ) ) , __magic_name__ ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) super().__init__( __magic_name__ , tokenizer_file=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , ) __snake_case : Optional[int] = vocab_file __snake_case : Any = False if not self.vocab_file else True __snake_case : Union[str, Any] = extra_ids @staticmethod def lowercase__ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : str ) -> Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: __snake_case : Dict = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" f''' {pretrained_model_name_or_path} automatically truncating your input to''' f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __magic_name__ , ) return max_model_length def lowercase__ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__magic_name__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __snake_case : Dict = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ): copyfile(self.vocab_file , __magic_name__ ) logger.info(f'''Copy vocab file to {out_vocab_file}''' ) return (out_vocab_file,) def lowercase__ ( self : int , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Dict = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: __snake_case : int = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Optional[Any] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowercase__ ( self : str ) -> Union[str, Any]: """simple docstring""" return list( set(filter(lambda __magic_name__ : bool(re.search(r"""<extra_id_\d+>""" , __magic_name__ ) ) is not None , self.additional_special_tokens ) ) ) def lowercase__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" return [self.convert_tokens_to_ids(__magic_name__ ) for token in self.get_sentinel_tokens()]
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) __snake_case : Tuple = """""" while len(_lowerCamelCase ) % 3 != 0: __snake_case : Any = """0""" + bin_string __snake_case : Tuple = [ bin_string[index : index + 3] for index in range(len(_lowerCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __snake_case : Tuple = 0 for index, val in enumerate(_lowerCamelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) ) oct_string += str(_lowerCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
13
1
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class _A : def __init__( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=13 , __magic_name__ : Optional[int]=32 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Any=16 , __magic_name__ : int=[1, 2, 1] , __magic_name__ : Dict=[2, 2, 4] , __magic_name__ : Optional[int]=2 , __magic_name__ : str=2.0 , __magic_name__ : str=True , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : List[str]=0.1 , __magic_name__ : int="gelu" , __magic_name__ : Optional[int]=False , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=1E-5 , __magic_name__ : int=True , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]=True , __magic_name__ : int=10 , __magic_name__ : int=8 , __magic_name__ : str=["stage1", "stage2", "stage3"] , __magic_name__ : List[Any]=[1, 2, 3] , ) -> Dict: """simple docstring""" __snake_case : List[str] = parent __snake_case : Optional[int] = batch_size __snake_case : List[str] = image_size __snake_case : Any = patch_size __snake_case : str = num_channels __snake_case : List[str] = embed_dim __snake_case : Dict = depths __snake_case : str = num_heads __snake_case : str = window_size __snake_case : List[Any] = mlp_ratio __snake_case : Optional[int] = qkv_bias __snake_case : Tuple = hidden_dropout_prob __snake_case : str = attention_probs_dropout_prob __snake_case : List[str] = drop_path_rate __snake_case : Optional[Any] = hidden_act __snake_case : List[str] = use_absolute_embeddings __snake_case : List[str] = patch_norm __snake_case : int = layer_norm_eps __snake_case : int = initializer_range __snake_case : int = is_training __snake_case : Optional[Any] = scope __snake_case : int = use_labels __snake_case : Tuple = type_sequence_label_size __snake_case : Optional[int] = encoder_stride __snake_case : Union[str, Any] = out_features __snake_case : int = out_indices def lowercase__ ( self : Any ) -> List[str]: """simple docstring""" __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> List[Any]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowercase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Tuple ) -> Optional[Any]: """simple docstring""" __snake_case : Tuple = MaskFormerSwinModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : int = model(__magic_name__ ) __snake_case : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __snake_case : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> List[Any]: """simple docstring""" __snake_case : int = MaskFormerSwinBackbone(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Any = model(__magic_name__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(__magic_name__ ): __snake_case : str = ["""stem"""] __snake_case : int = MaskFormerSwinBackbone(config=__magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : List[str] = config_and_inputs __snake_case : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: str = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowercase__: Union[str, Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {} lowercase__: Any = False lowercase__: Any = False lowercase__: Dict = False lowercase__: int = False lowercase__: Any = False def lowercase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __snake_case : List[Any] = MaskFormerSwinModelTester(self ) __snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__magic_name__ ) @unittest.skip("""Swin does not use inputs_embeds""" ) def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass @unittest.skip("""Swin does not support feedforward chunking""" ) def lowercase__ ( self : Dict ) -> Dict: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Dict ) -> List[Any]: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[Any] = model_class(__magic_name__ ) __snake_case : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : List[Any] = [*signature.parameters.keys()] __snake_case : Optional[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def lowercase__ ( self : Optional[Any] ) -> Any: """simple docstring""" pass def lowercase__ ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[Any]: """simple docstring""" __snake_case : int = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Any = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Optional[Any] = outputs.hidden_states __snake_case : Tuple = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__magic_name__ ) , __magic_name__ ) # Swin has a different seq_length __snake_case : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __snake_case : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __snake_case : Any = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = 3 __snake_case : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __snake_case : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __snake_case : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __snake_case : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __snake_case : str = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Tuple = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def lowercase__ ( self : List[Any] ) -> int: """simple docstring""" pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowercase__ ( self : int ) -> Any: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__magic_name__ : Optional[Any] ): __snake_case : Any = 0 return t def check_equivalence(__magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int={} ): with torch.no_grad(): __snake_case : Union[str, Any] = model(**__magic_name__ , return_dict=__magic_name__ , **__magic_name__ ) __snake_case : Tuple = model(**__magic_name__ , return_dict=__magic_name__ , **__magic_name__ ).to_tuple() def recursive_check(__magic_name__ : Dict , __magic_name__ : Tuple ): if isinstance(__magic_name__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__magic_name__ , __magic_name__ ): recursive_check(__magic_name__ , __magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(__magic_name__ , __magic_name__ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__magic_name__ ) , set_nan_tensor_to_zero(__magic_name__ ) , atol=1E-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(__magic_name__ ).any()} and `inf`: {torch.isinf(__magic_name__ )}. Dict has''' f''' `nan`: {torch.isnan(__magic_name__ ).any()} and `inf`: {torch.isinf(__magic_name__ )}.''' ) , ) recursive_check(__magic_name__ , __magic_name__ ) for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Union[str, Any] = self._prepare_for_class(__magic_name__ , __magic_name__ ) __snake_case : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ ) check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ ) __snake_case : int = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ ) __snake_case : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ ) check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ , {"""output_hidden_states""": True} ) __snake_case : Union[str, Any] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ , {"""output_hidden_states""": True} ) @require_torch class _A ( unittest.TestCase , __lowercase ): lowercase__: Optional[int] = (MaskFormerSwinBackbone,) if is_torch_available() else () lowercase__: Optional[Any] = MaskFormerSwinConfig def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" __snake_case : List[Any] = MaskFormerSwinModelTester(self ) def lowercase__ ( self : Dict ) -> Any: """simple docstring""" __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : int = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: __snake_case : Union[str, Any] = backbone_class(__magic_name__ ) backbone.to(__magic_name__ ) backbone.eval() __snake_case : int = backbone(**__magic_name__ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __magic_name__ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True __snake_case : List[Any] = backbone(**__magic_name__ , output_hidden_states=__magic_name__ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __snake_case , __snake_case , __snake_case : str = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: __snake_case : Optional[int] = backbone(**__magic_name__ , output_attentions=__magic_name__ ) self.assertIsNotNone(outputs.attentions )
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: List[Any] = StableDiffusionPanoramaPipeline lowercase__: Dict = TEXT_TO_IMAGE_PARAMS lowercase__: Dict = TEXT_TO_IMAGE_BATCH_PARAMS lowercase__: Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase__: Dict = TEXT_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) __snake_case : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __snake_case : Optional[int] = DDIMScheduler() torch.manual_seed(0 ) __snake_case : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) __snake_case : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) __snake_case : Optional[Any] = CLIPTextModel(__magic_name__ ) __snake_case : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __snake_case : List[str] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowercase__ ( self : Optional[int] , __magic_name__ : Any , __magic_name__ : List[str]=0 ) -> int: """simple docstring""" __snake_case : str = torch.manual_seed(__magic_name__ ) __snake_case : List[Any] = { """prompt""": """a photo of the dolomites""", """generator""": generator, # Setting height and width to None to prevent OOMs on CPU. """height""": None, """width""": None, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def lowercase__ ( self : int ) -> List[Any]: """simple docstring""" __snake_case : str = """cpu""" # ensure determinism for the device-dependent torch.Generator __snake_case : List[str] = self.get_dummy_components() __snake_case : List[str] = StableDiffusionPanoramaPipeline(**__magic_name__ ) __snake_case : Any = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[Any] = self.get_dummy_inputs(__magic_name__ ) __snake_case : Union[str, Any] = sd_pipe(**__magic_name__ ).images __snake_case : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : Union[str, Any] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase__ ( self : List[str] ) -> int: """simple docstring""" super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def lowercase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __snake_case : str = """cpu""" # ensure determinism for the device-dependent torch.Generator __snake_case : Optional[Any] = self.get_dummy_components() __snake_case : Optional[Any] = StableDiffusionPanoramaPipeline(**__magic_name__ ) __snake_case : str = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[Any] = self.get_dummy_inputs(__magic_name__ ) __snake_case : Optional[int] = """french fries""" __snake_case : Optional[int] = sd_pipe(**__magic_name__ , negative_prompt=__magic_name__ ) __snake_case : Optional[Any] = output.images __snake_case : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : str = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" __snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator __snake_case : int = self.get_dummy_components() __snake_case : Dict = StableDiffusionPanoramaPipeline(**__magic_name__ ) __snake_case : Optional[int] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : int = self.get_dummy_inputs(__magic_name__ ) __snake_case : Optional[Any] = sd_pipe(**__magic_name__ , view_batch_size=2 ) __snake_case : Optional[int] = output.images __snake_case : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : Any = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" __snake_case : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator __snake_case : str = self.get_dummy_components() __snake_case : List[Any] = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) __snake_case : List[Any] = StableDiffusionPanoramaPipeline(**__magic_name__ ) __snake_case : List[str] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : Tuple = self.get_dummy_inputs(__magic_name__ ) __snake_case : List[Any] = sd_pipe(**__magic_name__ ).images __snake_case : str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : int = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase__ ( self : int ) -> Any: """simple docstring""" __snake_case : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator __snake_case : str = self.get_dummy_components() __snake_case : str = PNDMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=__magic_name__ ) __snake_case : Any = StableDiffusionPanoramaPipeline(**__magic_name__ ) __snake_case : List[Any] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[Any] = self.get_dummy_inputs(__magic_name__ ) __snake_case : Optional[int] = sd_pipe(**__magic_name__ ).images __snake_case : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : Optional[int] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : str ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Dict , __magic_name__ : Optional[int]=0 ) -> Optional[Any]: """simple docstring""" __snake_case : List[str] = torch.manual_seed(__magic_name__ ) __snake_case : Dict = { """prompt""": """a photo of the dolomites""", """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = """stabilityai/stable-diffusion-2-base""" __snake_case : List[str] = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="""scheduler""" ) __snake_case : Dict = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) pipe.enable_attention_slicing() __snake_case : Any = self.get_inputs() __snake_case : int = pipe(**__magic_name__ ).images __snake_case : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) __snake_case : Union[str, Any] = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained( """stabilityai/stable-diffusion-2-base""" , safety_checker=__magic_name__ ) __snake_case : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) pipe.enable_attention_slicing() __snake_case : Tuple = self.get_inputs() __snake_case : Tuple = pipe(**__magic_name__ ).images __snake_case : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) __snake_case : Optional[Any] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowercase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __snake_case : List[str] = 0 def callback_fn(__magic_name__ : int , __magic_name__ : int , __magic_name__ : torch.FloatTensor ) -> None: __snake_case : Union[str, Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __snake_case : List[str] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) __snake_case : Optional[int] = latents[0, -3:, -3:, -1] __snake_case : List[str] = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: __snake_case : List[str] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) __snake_case : int = latents[0, -3:, -3:, -1] __snake_case : List[str] = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 __snake_case : Optional[Any] = False __snake_case : List[Any] = """stabilityai/stable-diffusion-2-base""" __snake_case : Any = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="""scheduler""" ) __snake_case : Any = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ ) __snake_case : List[Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) pipe.enable_attention_slicing() __snake_case : List[Any] = self.get_inputs() pipe(**__magic_name__ , callback=__magic_name__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __snake_case : Tuple = """stabilityai/stable-diffusion-2-base""" __snake_case : int = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="""scheduler""" ) __snake_case : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ ) __snake_case : int = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __snake_case : Union[str, Any] = self.get_inputs() __snake_case : Union[str, Any] = pipe(**__magic_name__ ) __snake_case : Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
1
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase ) -> list: """simple docstring""" if len(_lowerCamelCase ) == 0: return [] __snake_case , __snake_case : Tuple = min(_lowerCamelCase ), max(_lowerCamelCase ) __snake_case : List[Any] = int(max_value - min_value ) + 1 __snake_case : list[list] = [[] for _ in range(_lowerCamelCase )] for i in my_list: buckets[int(i - min_value )].append(_lowerCamelCase ) return [v for bucket in buckets for v in sorted(_lowerCamelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
13
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = image_size __snake_case : Tuple = patch_size __snake_case : str = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : Dict = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : List[Any] = shape_size __snake_case : Tuple = num_labels __snake_case : List[Any] = num_choices __snake_case : Optional[Any] = scope __snake_case : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : List[str] = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __snake_case : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Union[str, Any] = bbox[i, j, 3] __snake_case : Union[str, Any] = bbox[i, j, 1] __snake_case : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Optional[Any] = bbox[i, j, 2] __snake_case : Tuple = bbox[i, j, 0] __snake_case : Optional[Any] = tmp_coordinate __snake_case : Dict = tf.constant(__magic_name__ ) __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ ) # text + image __snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) __snake_case : List[str] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , ) __snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Any = self.num_labels __snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ ) __snake_case : Tuple = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = 2 __snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs __snake_case : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase__: Union[str, Any] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase__: Dict = False lowercase__: int = False lowercase__: Dict = False def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" return True def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict: """simple docstring""" __snake_case : Any = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = { k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : str = TFLayoutLMvaModelTester(self ) __snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ): # The number of elements in the loss should be the same as the number of elements in the label __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0] ] __snake_case : List[str] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = prepared_for_class.pop("""input_ids""" ) __snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : str = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __snake_case : str = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __snake_case : Dict = -1_00 __snake_case : str = tf.convert_to_tensor(__magic_name__ ) __snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = model(__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) # Get keys that were added with the _prepare_for_class function __snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys() __snake_case : Optional[Any] = inspect.signature(model.call ).parameters __snake_case : int = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __snake_case : Union[str, Any] = {0: """input_ids"""} for label_key in label_keys: __snake_case : int = signature_names.index(__magic_name__ ) __snake_case : Optional[int] = label_key __snake_case : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __snake_case : List[str] = prepared_for_class[value] __snake_case : str = tuple(__magic_name__ ) # Send to model __snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Tuple = type self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __snake_case : str = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values __snake_case : Tuple = tf.constant([[1, 2]] ) __snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) # verify the logits __snake_case : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Tuple = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __UpperCamelCase = logging.get_logger(__name__) class _A ( __lowercase ): lowercase__: List[str] = '''vision-encoder-decoder''' lowercase__: List[Any] = True def __init__( self : Union[str, Any] , **__magic_name__ : int ) -> List[Any]: """simple docstring""" super().__init__(**__magic_name__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'''A configuraton of type {self.model_type} cannot be instantiated because ''' f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) __snake_case : str = kwargs.pop("""encoder""" ) __snake_case : Any = encoder_config.pop("""model_type""" ) __snake_case : Tuple = kwargs.pop("""decoder""" ) __snake_case : List[str] = decoder_config.pop("""model_type""" ) __snake_case : Union[str, Any] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) __snake_case : Dict = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) __snake_case : List[str] = True @classmethod def lowercase__ ( cls : Dict , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , **__magic_name__ : Optional[int] ) -> PretrainedConfig: """simple docstring""" logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) __snake_case : Optional[Any] = True __snake_case : int = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ ) def lowercase__ ( self : int ) -> int: """simple docstring""" __snake_case : List[str] = copy.deepcopy(self.__dict__ ) __snake_case : Tuple = self.encoder.to_dict() __snake_case : Optional[Any] = self.decoder.to_dict() __snake_case : str = self.__class__.model_type return output class _A ( __lowercase ): lowercase__: Dict = version.parse('''1.11''' ) @property def lowercase__ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowercase__ ( self : List[Any] ) -> float: """simple docstring""" return 1E-4 @property def lowercase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class _A ( __lowercase ): @property def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : Any = OrderedDict() __snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __snake_case : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __snake_case : int = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def lowercase__ ( self : Tuple , __magic_name__ : "PreTrainedTokenizerBase" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]: """simple docstring""" import torch __snake_case : List[str] = OrderedDict() __snake_case : List[str] = super().generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) __snake_case , __snake_case : List[Any] = dummy_input["""input_ids"""].shape __snake_case : str = (batch, encoder_sequence, self._config.encoder_hidden_size) __snake_case : Union[str, Any] = dummy_input.pop("""input_ids""" ) __snake_case : Dict = dummy_input.pop("""attention_mask""" ) __snake_case : Optional[Any] = torch.zeros(__magic_name__ ) return common_inputs class _A ( __lowercase ): @property def lowercase__ ( self : str ) -> None: """simple docstring""" pass def lowercase__ ( self : List[Any] , __magic_name__ : PretrainedConfig ) -> OnnxConfig: """simple docstring""" return VisionEncoderDecoderEncoderOnnxConfig(__magic_name__ ) def lowercase__ ( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" ) -> OnnxConfig: """simple docstring""" __snake_case : str = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__magic_name__ , __magic_name__ )
13
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]: """simple docstring""" __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[Any] = num_channels __snake_case : List[str] = patch_size __snake_case : List[str] = num_frames __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : str = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = attention_type __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scope __snake_case : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case : str = (image_size // patch_size) ** 2 __snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case : str = self.num_labels return config def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" __snake_case : Optional[int] = TimesformerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str: """simple docstring""" __snake_case : Any = TimesformerForVideoClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) # verify the logits shape __snake_case : Dict = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__: List[Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__: List[str] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : List[str] = TimesformerModelTester(self ) __snake_case : List[Any] = ConfigTester( self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int: """simple docstring""" __snake_case : Dict = copy.deepcopy(__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = TimesformerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : List[str] = self.model_tester.seq_length __snake_case : Tuple = self.model_tester.num_frames __snake_case : str = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Dict = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Optional[int] = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case : int = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Optional[int] = True __snake_case : Optional[int] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : List[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ): __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.hidden_states __snake_case : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : List[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( __magic_name__ ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : Dict = prepare_video() __snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _A ( __lowercase ): lowercase__: List[Any] = (KDPMaDiscreteScheduler,) lowercase__: List[str] = 10 def lowercase__ ( self : Union[str, Any] , **__magic_name__ : Dict ) -> Any: """simple docstring""" __snake_case : Optional[int] = { """num_train_timesteps""": 11_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**__magic_name__ ) return config def lowercase__ ( self : Tuple ) -> int: """simple docstring""" for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__magic_name__ ) def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ ) def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__magic_name__ ) def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Any = self.scheduler_classes[0] __snake_case : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" ) __snake_case : List[Any] = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(self.num_inference_steps ) __snake_case : Dict = self.dummy_model() __snake_case : str = self.dummy_sample_deter * scheduler.init_noise_sigma __snake_case : List[str] = sample.to(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): __snake_case : int = scheduler.scale_model_input(__magic_name__ , __magic_name__ ) __snake_case : List[str] = model(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ) __snake_case : Tuple = output.prev_sample __snake_case : Any = torch.sum(torch.abs(__magic_name__ ) ) __snake_case : Optional[Any] = torch.mean(torch.abs(__magic_name__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2 assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0002 ) < 1E-3 def lowercase__ ( self : str ) -> Dict: """simple docstring""" if torch_device == "mps": return __snake_case : List[Any] = self.scheduler_classes[0] __snake_case : Optional[Any] = self.get_scheduler_config() __snake_case : Any = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(self.num_inference_steps ) __snake_case : List[Any] = self.dummy_model() __snake_case : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __snake_case : Dict = sample.to(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): __snake_case : Dict = scheduler.scale_model_input(__magic_name__ , __magic_name__ ) __snake_case : List[str] = model(__magic_name__ , __magic_name__ ) __snake_case : Any = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ) __snake_case : List[Any] = output.prev_sample __snake_case : Union[str, Any] = torch.sum(torch.abs(__magic_name__ ) ) __snake_case : Dict = torch.mean(torch.abs(__magic_name__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" if torch_device == "mps": return __snake_case : Optional[int] = self.scheduler_classes[0] __snake_case : Optional[int] = self.get_scheduler_config() __snake_case : Union[str, Any] = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ ) __snake_case : Dict = self.dummy_model() __snake_case : Dict = self.dummy_sample_deter.to(__magic_name__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __snake_case : str = scheduler.scale_model_input(__magic_name__ , __magic_name__ ) __snake_case : List[str] = model(__magic_name__ , __magic_name__ ) __snake_case : List[Any] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = output.prev_sample __snake_case : str = torch.sum(torch.abs(__magic_name__ ) ) __snake_case : Optional[int] = torch.mean(torch.abs(__magic_name__ ) ) if str(__magic_name__ ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ConditionalDetrFeatureExtractor"] __UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _a ( _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) __snake_case : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) AutoTokenizer.from_pretrained(_lowerCamelCase ).save_pretrained(_lowerCamelCase ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : str = 0 __snake_case : Optional[int] = len(_lowerCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" if len(_lowerCamelCase ) <= 1: return arr, 0 __snake_case : Any = len(_lowerCamelCase ) // 2 __snake_case : List[str] = arr[0:mid] __snake_case : int = arr[mid:] __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase ) __snake_case : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = [] __snake_case : List[str] = 0 while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , _lowerCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __snake_case : Any = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) # an empty list should also have zero inversions __snake_case : List[Any] = [] __snake_case : List[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __UpperCamelCase = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> Optional[Any]: """simple docstring""" if rng is None: __snake_case : int = random.Random() __snake_case : Dict = 1 for dim in shape: total_dims *= dim __snake_case : int = [] for _ in range(_lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) __snake_case : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase ) return output def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase ) # make sure that at least one token is attended to for each batch __snake_case : Optional[Any] = 1 return attn_mask @require_flax class _A : lowercase__: Optional[int] = None lowercase__: str = () def lowercase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 __snake_case : Any = 2 __snake_case : Union[str, Any] = inputs["""input_ids"""].shape[-1] // 2 __snake_case : Union[str, Any] = inputs["""input_ids"""][:max_batch_size, :sequence_length] __snake_case : Dict = jnp.ones_like(__magic_name__ ) __snake_case : Any = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens __snake_case : Optional[int] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` __snake_case : str = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : List[Any] = self._get_input_ids_and_config() __snake_case : Union[str, Any] = False __snake_case : Optional[Any] = max_length __snake_case : List[str] = 0 for model_class in self.all_generative_model_classes: __snake_case : Tuple = model_class(__magic_name__ ) __snake_case : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning __snake_case : int = getattr(__magic_name__ , __magic_name__ ) __snake_case : Tuple = pt_model_class(__magic_name__ ).eval() __snake_case : Optional[Any] = load_flax_weights_in_pytorch_model(__magic_name__ , flax_model.params ) __snake_case : str = flax_model.generate(__magic_name__ ).sequences __snake_case : List[str] = pt_model.generate(torch.tensor(__magic_name__ , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: __snake_case : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def lowercase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : int = self._get_input_ids_and_config() __snake_case : Optional[int] = False __snake_case : Optional[int] = max_length for model_class in self.all_generative_model_classes: __snake_case : List[str] = model_class(__magic_name__ ) __snake_case : Dict = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1] , __magic_name__ ) __snake_case : List[str] = jit(model.generate ) __snake_case : Optional[Any] = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = self._get_input_ids_and_config() __snake_case : Tuple = True __snake_case : int = max_length for model_class in self.all_generative_model_classes: __snake_case : Tuple = model_class(__magic_name__ ) __snake_case : Dict = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1] , __magic_name__ ) __snake_case : str = jit(model.generate ) __snake_case : int = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def lowercase__ ( self : Dict ) -> str: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : int = self._get_input_ids_and_config() __snake_case : int = False __snake_case : List[str] = max_length __snake_case : str = 2 for model_class in self.all_generative_model_classes: __snake_case : int = model_class(__magic_name__ ) __snake_case : Optional[Any] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1] , __magic_name__ ) __snake_case : Union[str, Any] = jit(model.generate ) __snake_case : Optional[Any] = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def lowercase__ ( self : Any ) -> List[Any]: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : Any = self._get_input_ids_and_config() __snake_case : List[Any] = False __snake_case : Tuple = max_length __snake_case : Union[str, Any] = 2 __snake_case : Union[str, Any] = 2 for model_class in self.all_generative_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : List[Any] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def lowercase__ ( self : Tuple ) -> List[Any]: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = self._get_input_ids_and_config() __snake_case : int = True __snake_case : Optional[int] = max_length __snake_case : Any = 0.8 __snake_case : List[Any] = 10 __snake_case : Union[str, Any] = 0.3 __snake_case : str = 1 __snake_case : List[Any] = 8 __snake_case : str = 9 for model_class in self.all_generative_model_classes: __snake_case : int = model_class(__magic_name__ ) __snake_case : Union[str, Any] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1] , __magic_name__ ) __snake_case : Union[str, Any] = jit(model.generate ) __snake_case : Dict = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = self._get_input_ids_and_config() __snake_case : Optional[int] = max_length __snake_case : List[Any] = 1 __snake_case : Dict = 8 __snake_case : Any = 9 for model_class in self.all_generative_model_classes: __snake_case : str = model_class(__magic_name__ ) __snake_case : List[str] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1] , __magic_name__ ) __snake_case : Optional[int] = jit(model.generate ) __snake_case : List[str] = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def lowercase__ ( self : int ) -> List[Any]: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : Any = self._get_input_ids_and_config() __snake_case : Tuple = max_length __snake_case : Union[str, Any] = 2 __snake_case : Union[str, Any] = 1 __snake_case : int = 8 __snake_case : Optional[Any] = 9 for model_class in self.all_generative_model_classes: __snake_case : Optional[int] = model_class(__magic_name__ ) __snake_case : Union[str, Any] = model.generate(__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1] , __magic_name__ ) __snake_case : Union[str, Any] = jit(model.generate ) __snake_case : Any = jit_generate(__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : str = self._get_input_ids_and_config() # pad attention mask on the left __snake_case : Any = attention_mask.at[(0, 0)].set(0 ) __snake_case : Optional[int] = False __snake_case : Tuple = max_length for model_class in self.all_generative_model_classes: __snake_case : List[Any] = model_class(__magic_name__ ) __snake_case : List[Any] = model.generate(__magic_name__ , attention_mask=__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1] , __magic_name__ ) __snake_case : Union[str, Any] = jit(model.generate ) __snake_case : int = jit_generate(__magic_name__ , attention_mask=__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = self._get_input_ids_and_config() # pad attention mask on the left __snake_case : int = attention_mask.at[(0, 0)].set(0 ) __snake_case : Optional[Any] = True __snake_case : List[Any] = max_length for model_class in self.all_generative_model_classes: __snake_case : Tuple = model_class(__magic_name__ ) __snake_case : Optional[Any] = model.generate(__magic_name__ , attention_mask=__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1] , __magic_name__ ) __snake_case : List[Any] = jit(model.generate ) __snake_case : Union[str, Any] = jit_generate(__magic_name__ , attention_mask=__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def lowercase__ ( self : Any ) -> Any: """simple docstring""" __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = self._get_input_ids_and_config() # pad attention mask on the left __snake_case : str = attention_mask.at[(0, 0)].set(0 ) __snake_case : Tuple = 2 __snake_case : List[Any] = max_length for model_class in self.all_generative_model_classes: __snake_case : Dict = model_class(__magic_name__ ) __snake_case : str = model.generate(__magic_name__ , attention_mask=__magic_name__ ).sequences self.assertEqual(generation_outputs.shape[-1] , __magic_name__ ) __snake_case : Dict = jit(model.generate ) __snake_case : List[Any] = jit_generate(__magic_name__ , attention_mask=__magic_name__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class _A ( unittest.TestCase ): def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" __snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" ) __snake_case : Optional[int] = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) __snake_case : Union[str, Any] = """Hello world""" __snake_case : Any = tokenizer(__magic_name__ , return_tensors="""np""" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__magic_name__ , """do_samples""" ): model.generate(__magic_name__ , do_samples=__magic_name__ ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__magic_name__ , """foo""" ): __snake_case : Optional[Any] = {"""foo""": """bar"""} model.generate(__magic_name__ , **__magic_name__ )
13
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
13
1
'''simple docstring''' def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) __snake_case : Tuple = """""" while len(_lowerCamelCase ) % 3 != 0: __snake_case : Any = """0""" + bin_string __snake_case : Tuple = [ bin_string[index : index + 3] for index in range(len(_lowerCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __snake_case : Tuple = 0 for index, val in enumerate(_lowerCamelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) ) oct_string += str(_lowerCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
13
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = CanineTokenizer lowercase__: Optional[int] = False def lowercase__ ( self : Any ) -> Any: """simple docstring""" super().setUp() __snake_case : Dict = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer: """simple docstring""" __snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) __snake_case : Optional[Any] = 10_24 return tokenizer @require_torch def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[Any] = self.canine_tokenizer __snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off __snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Any = self.canine_tokenizer __snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] __snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.canine_tokenizer __snake_case : Optional[Any] = [ """What's the weater?""", """It's about 25 degrees.""", ] __snake_case : Any = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Dict = tempfile.mkdtemp() __snake_case : str = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) __snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Optional[int] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __snake_case : List[Any] = chr(0xE007 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE005 __snake_case : Tuple = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) __snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) __snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) __snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) __snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : Dict = chr(0xE005 ) __snake_case : str = chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) __snake_case : Tuple = tokenizer.tokenize(__magic_name__ ) __snake_case : Any = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __snake_case : Optional[Any] = 0xE006 __snake_case : List[str] = chr(__magic_name__ ) __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Any = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Tuple = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE006 __snake_case : int = chr(__magic_name__ ) __snake_case : List[Any] = [new_token_a] __snake_case : Union[str, Any] = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __snake_case : Any = 0xE007 __snake_case : Any = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] __snake_case : Union[str, Any] = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : List[str] = """hello world""" if self.space_between_special_tokens: __snake_case : Union[str, Any] = """[CLS] hello world [SEP]""" else: __snake_case : List[Any] = input __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowercase__ ( self : Tuple ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : str = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __snake_case : Dict = """a""" __snake_case : Tuple = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) __snake_case : Dict = 0xE006 __snake_case : str = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" pass def lowercase__ ( self : str ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass
13
1
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __snake_case : int = k.replace(_lowerCamelCase , _lowerCamelCase ) if k.startswith("""encoder""" ): __snake_case : Optional[Any] = k.replace(""".attn""" , """.self_attn""" ) __snake_case : List[str] = k.replace("""norm1""" , """self_attn_layer_norm""" ) __snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): __snake_case : List[str] = k.replace("""norm1""" , """self_attn_layer_norm""" ) __snake_case : Dict = k.replace("""norm2""" , """encoder_attn_layer_norm""" ) __snake_case : Optional[Any] = k.replace("""norm3""" , """final_layer_norm""" ) return k def _a ( _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : List[Any] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: __snake_case : Union[str, Any] = sd.pop(_lowerCamelCase ) __snake_case : List[Any] = k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd __snake_case : Tuple = v __UpperCamelCase = ["START"] @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" ) __snake_case : Tuple = model["""model"""] __snake_case : Union[str, Any] = BlenderbotConfig.from_json_file(_lowerCamelCase ) __snake_case : Tuple = BlenderbotForConditionalGeneration(_lowerCamelCase ) __snake_case : Optional[Any] = m.model.state_dict().keys() __snake_case : Optional[Any] = [] __snake_case : List[Any] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue __snake_case : List[str] = rename_state_dict_key(_lowerCamelCase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __snake_case : str = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(_lowerCamelCase ) m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) m.half() m.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) __UpperCamelCase = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
13
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
13
1
'''simple docstring''' import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" __snake_case : int = XCLIPTextConfig() # derive patch size from model name __snake_case : Any = model_name.find("""patch""" ) __snake_case : int = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) __snake_case : str = XCLIPVisionConfig(patch_size=_lowerCamelCase , num_frames=_lowerCamelCase ) if "large" in model_name: __snake_case : Optional[Any] = 768 __snake_case : Any = 3072 __snake_case : Dict = 12 __snake_case : Tuple = 1024 __snake_case : Optional[int] = 4096 __snake_case : Dict = 16 __snake_case : List[Any] = 24 __snake_case : Dict = 768 __snake_case : Optional[int] = 3072 if model_name == "xclip-large-patch14-16-frames": __snake_case : Optional[int] = 336 __snake_case : Union[str, Any] = XCLIPConfig.from_text_vision_configs(_lowerCamelCase , _lowerCamelCase ) if "large" in model_name: __snake_case : List[Any] = 768 return config def _a ( _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" if name == "token_embedding.weight": __snake_case : Union[str, Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": __snake_case : Dict = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: __snake_case : Tuple = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: __snake_case : List[str] = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: __snake_case : str = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: __snake_case : Optional[Any] = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): __snake_case : List[str] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: __snake_case : int = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: __snake_case : List[Any] = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": __snake_case : str = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": __snake_case : Optional[int] = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): __snake_case : Optional[int] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: __snake_case : List[str] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: __snake_case : List[Any] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: __snake_case : List[Any] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: __snake_case : List[Any] = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: __snake_case : str = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: __snake_case : str = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: __snake_case : str = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": __snake_case : Tuple = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): __snake_case : Any = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): __snake_case : Union[str, Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): __snake_case : int = orig_state_dict.pop(_lowerCamelCase ) if "attn.in_proj" in key: __snake_case : List[str] = key.split(""".""" ) if key.startswith("""visual""" ): __snake_case : Optional[int] = key_split[3] __snake_case : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __snake_case : Optional[Any] = val[ :dim, : ] __snake_case : str = val[ dim : dim * 2, : ] __snake_case : List[str] = val[ -dim:, : ] else: __snake_case : Tuple = val[ :dim ] __snake_case : Any = val[ dim : dim * 2 ] __snake_case : str = val[ -dim: ] else: if "weight" in key: __snake_case : List[Any] = val[ :dim, : ] __snake_case : str = val[ dim : dim * 2, : ] __snake_case : List[Any] = val[ -dim:, : ] else: __snake_case : str = val[:dim] __snake_case : Optional[int] = val[ dim : dim * 2 ] __snake_case : Dict = val[-dim:] elif key.startswith("""mit""" ): __snake_case : int = key_split[2] __snake_case : Optional[Any] = config.vision_config.mit_hidden_size if "weight" in key: __snake_case : List[str] = val[:dim, :] __snake_case : Dict = val[dim : dim * 2, :] __snake_case : Optional[Any] = val[-dim:, :] else: __snake_case : Optional[Any] = val[:dim] __snake_case : Optional[Any] = val[dim : dim * 2] __snake_case : List[Any] = val[-dim:] else: __snake_case : Tuple = key_split[2] __snake_case : Union[str, Any] = config.text_config.hidden_size if "weight" in key: __snake_case : Union[str, Any] = val[:dim, :] __snake_case : Union[str, Any] = val[ dim : dim * 2, : ] __snake_case : Optional[Any] = val[-dim:, :] else: __snake_case : List[str] = val[:dim] __snake_case : Tuple = val[ dim : dim * 2 ] __snake_case : Any = val[-dim:] else: __snake_case : Optional[int] = rename_key(_lowerCamelCase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __snake_case : Dict = val.T __snake_case : List[Any] = val return orig_state_dict def _a ( _lowerCamelCase ) -> Optional[Any]: """simple docstring""" if num_frames == 8: __snake_case : Optional[Any] = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: __snake_case : Tuple = """eating_spaghetti.npy""" elif num_frames == 32: __snake_case : Union[str, Any] = """eating_spaghetti_32_frames.npy""" __snake_case : List[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=_lowerCamelCase , repo_type="""dataset""" , ) __snake_case : Dict = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False ) -> str: """simple docstring""" __snake_case : Optional[int] = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } __snake_case : Tuple = model_to_url[model_name] __snake_case : Dict = 8 if "16-frames" in model_name: __snake_case : List[Any] = 16 elif "shot" in model_name: __snake_case : Optional[int] = 32 __snake_case : List[Any] = get_xclip_config(_lowerCamelCase , _lowerCamelCase ) __snake_case : List[str] = XCLIPModel(_lowerCamelCase ) model.eval() if "drive" in checkpoint_url: __snake_case : int = """pytorch_model.bin""" gdown.cached_download(_lowerCamelCase , _lowerCamelCase , quiet=_lowerCamelCase ) __snake_case : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )["""model"""] else: __snake_case : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCamelCase )["""model"""] __snake_case : str = convert_state_dict(_lowerCamelCase , _lowerCamelCase ) __snake_case : Optional[Any] = XCLIPModel(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __snake_case : List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224 __snake_case : str = VideoMAEImageProcessor(size=_lowerCamelCase ) __snake_case : Optional[Any] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) __snake_case : int = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) __snake_case : Tuple = XCLIPProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase ) __snake_case : Tuple = prepare_video(_lowerCamelCase ) __snake_case : Optional[Any] = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=_lowerCamelCase , return_tensors="""pt""" , padding=_lowerCamelCase ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): __snake_case : Dict = model(**_lowerCamelCase ) # Verify outputs __snake_case : Tuple = outputs.logits_per_video __snake_case : int = logits_per_video.softmax(dim=1 ) print("""Probs:""" , _lowerCamelCase ) # kinetics-400 if model_name == "xclip-base-patch32": __snake_case : Optional[Any] = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] ) elif model_name == "xclip-base-patch32-16-frames": __snake_case : str = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] ) elif model_name == "xclip-base-patch16": __snake_case : Dict = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] ) elif model_name == "xclip-base-patch16-16-frames": __snake_case : str = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] ) elif model_name == "xclip-large-patch14": __snake_case : Union[str, Any] = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] ) elif model_name == "xclip-large-patch14-16-frames": __snake_case : Tuple = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __snake_case : Dict = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __snake_case : int = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __snake_case : Optional[int] = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __snake_case : str = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __snake_case : Tuple = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __snake_case : Optional[Any] = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __snake_case : Optional[Any] = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __snake_case : str = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __snake_case : Any = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __snake_case : Tuple = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __snake_case : Optional[Any] = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __snake_case : Dict = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCamelCase ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(_lowerCamelCase , organization="""nielsr""" ) processor.push_to_hub(_lowerCamelCase , organization="""nielsr""" ) slow_tokenizer.push_to_hub(_lowerCamelCase , organization="""nielsr""" ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __UpperCamelCase = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
13
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class _A ( __lowercase ): lowercase__: str = '''codegen''' lowercase__: Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int: """simple docstring""" __snake_case : List[str] = vocab_size __snake_case : Union[str, Any] = n_ctx __snake_case : int = n_positions __snake_case : str = n_embd __snake_case : Dict = n_layer __snake_case : List[Any] = n_head __snake_case : Any = n_inner __snake_case : str = rotary_dim __snake_case : List[str] = activation_function __snake_case : Tuple = resid_pdrop __snake_case : Dict = embd_pdrop __snake_case : int = attn_pdrop __snake_case : Tuple = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = use_cache __snake_case : Dict = bos_token_id __snake_case : Union[str, Any] = eos_token_id super().__init__( bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ ) class _A ( __lowercase ): def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple: """simple docstring""" super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ ) if not getattr(self._config , """pad_token_id""" , __magic_name__ ): # TODO: how to do that better? __snake_case : List[str] = 0 @property def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) __snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: __snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return self._config.n_layer @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.n_head def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() __snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __snake_case , __snake_case : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __snake_case : Tuple = seqlen + 2 __snake_case : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : List[str] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] __snake_case : Optional[int] = common_inputs["""attention_mask"""] if self.use_past: __snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype __snake_case : Optional[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return 13
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase = { "configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["VisionEncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["TFVisionEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["FlaxVisionEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( __lowercase , unittest.TestCase ): lowercase__: int = KandinskyImgaImgPipeline lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] lowercase__: int = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] lowercase__: List[Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowercase__: Any = False @property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return 32 @property def lowercase__ ( self : str ) -> str: """simple docstring""" return 32 @property def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" return self.time_input_dim @property def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return 1_00 @property def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" __snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) __snake_case : Tuple = MultilingualCLIP(__magic_name__ ) __snake_case : Optional[Any] = text_encoder.eval() return text_encoder @property def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __snake_case : Tuple = UNetaDConditionModel(**__magic_name__ ) return model @property def lowercase__ ( self : str ) -> Dict: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : Tuple = self.dummy_text_encoder __snake_case : Dict = self.dummy_tokenizer __snake_case : Dict = self.dummy_unet __snake_case : int = self.dummy_movq __snake_case : List[Any] = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __snake_case : Dict = DDIMScheduler(**__magic_name__ ) __snake_case : Any = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str: """simple docstring""" __snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ ) # create init_image __snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__magic_name__ ).startswith("""mps""" ): __snake_case : str = torch.manual_seed(__magic_name__ ) else: __snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : Optional[Any] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : Dict = """cpu""" __snake_case : Union[str, Any] = self.get_dummy_components() __snake_case : List[str] = self.pipeline_class(**__magic_name__ ) __snake_case : Optional[Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) ) __snake_case : List[str] = output.images __snake_case : Any = pipe( **self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0] __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : int = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) __snake_case : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __snake_case : List[Any] = """A red cartoon frog, 4k""" __snake_case : str = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__magic_name__ ) __snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) __snake_case : Any = pipeline.to(__magic_name__ ) pipeline.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case , __snake_case : Optional[Any] = pipe_prior( __magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __snake_case : List[str] = pipeline( __magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) __snake_case : Dict = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
13
1
'''simple docstring''' def _a ( ) -> int: """simple docstring""" __snake_case : Union[str, Any] = 0 for i in range(1 , 1001 ): total += i**i return str(_lowerCamelCase )[-10:] if __name__ == "__main__": print(solution())
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __UpperCamelCase = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __UpperCamelCase = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } class _A ( __lowercase ): lowercase__: Any = VOCAB_FILES_NAMES lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask'''] lowercase__: List[str] = BartTokenizer def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]: """simple docstring""" super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) __snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) ) __snake_case : str = add_prefix_space __snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ ) __snake_case : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __snake_case : Any = """post_processor""" __snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: __snake_case : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __snake_case : Tuple = tuple(state["""sep"""] ) if "cls" in state: __snake_case : int = tuple(state["""cls"""] ) __snake_case : Optional[int] = False if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : Optional[Any] = add_prefix_space __snake_case : List[str] = True if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets: __snake_case : Optional[int] = trim_offsets __snake_case : Any = True if changes_to_apply: __snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) ) __snake_case : List[Any] = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def lowercase__ ( self : List[Any] ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value __snake_case : Union[str, Any] = value def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding: """simple docstring""" __snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding: """simple docstring""" __snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Optional[int] = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
13
1
'''simple docstring''' import warnings from functools import wraps from typing import Callable def _a ( _lowerCamelCase ) -> Callable: """simple docstring""" @wraps(_lowerCamelCase ) def _inner_fn(*_lowerCamelCase , **_lowerCamelCase ): warnings.warn( (F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , _lowerCamelCase , ) return fn(*_lowerCamelCase , **_lowerCamelCase ) return _inner_fn
13
'''simple docstring''' import os import numpy import onnx def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Optional[int] = a.name __snake_case : Dict = b.name __snake_case : Optional[int] = """""" __snake_case : int = """""" __snake_case : Any = a == b __snake_case : List[Any] = name_a __snake_case : List[str] = name_b return res def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_lowerCamelCase , _lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Dict = list(model.graph.initializer ) __snake_case : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __snake_case : Tuple = inits[i].name __snake_case : Tuple = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : str = os.path.dirname(_lowerCamelCase ) __snake_case : Dict = os.path.basename(_lowerCamelCase ) __snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) ) __snake_case : Dict = list(model.graph.initializer ) __snake_case : Optional[int] = set() __snake_case : Optional[Any] = {} __snake_case : Tuple = [] __snake_case : List[Any] = 0 for i in range(len(_lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(_lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_lowerCamelCase ) dup_set.add(_lowerCamelCase ) __snake_case : List[Any] = inits[j].data_type __snake_case : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , _lowerCamelCase ) total_reduced_size += mem_size __snake_case : Any = inits[i].name __snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(_lowerCamelCase ) else: __snake_case : Dict = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) __snake_case : int = sorted(_lowerCamelCase ) _remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : str = """optimized_""" + model_file_name __snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) onnx.save(_lowerCamelCase , _lowerCamelCase ) return new_model
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) __UpperCamelCase = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase = ["small", "medium", "large"] __UpperCamelCase = "lm_head.decoder.weight" __UpperCamelCase = "lm_head.weight" def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : Optional[int] = torch.load(_lowerCamelCase ) __snake_case : Optional[int] = d.pop(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) __UpperCamelCase = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") __UpperCamelCase = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
13
1
'''simple docstring''' import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _A : def __init__( self : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str]=13 , __magic_name__ : List[str]=30 , __magic_name__ : int=2 , __magic_name__ : Any=3 , __magic_name__ : List[str]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=32 , __magic_name__ : Optional[int]=5 , __magic_name__ : List[str]=4 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Tuple=10 , __magic_name__ : Union[str, Any]=0.02 , __magic_name__ : str=None , __magic_name__ : Dict=2 , ) -> Optional[int]: """simple docstring""" __snake_case : Any = parent __snake_case : Dict = batch_size __snake_case : Optional[int] = image_size __snake_case : Tuple = patch_size __snake_case : str = num_channels __snake_case : Any = is_training __snake_case : List[Any] = use_labels __snake_case : int = hidden_size __snake_case : List[Any] = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : Dict = hidden_act __snake_case : Tuple = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : int = type_sequence_label_size __snake_case : int = initializer_range __snake_case : List[Any] = scope __snake_case : Any = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __snake_case : Dict = (image_size // patch_size) ** 2 __snake_case : Tuple = num_patches + 1 def lowercase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : str = None if self.use_labels: __snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = self.get_config() return config, pixel_values, labels def lowercase__ ( self : str ) -> List[str]: """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowercase__ ( self : int , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = ViTModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Dict = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Dict , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Any ) -> int: """simple docstring""" __snake_case : Optional[Any] = ViTForMaskedImageModeling(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : List[str] = model(__magic_name__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __snake_case : Tuple = 1 __snake_case : Optional[Any] = ViTForMaskedImageModeling(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __snake_case : Any = model(__magic_name__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.type_sequence_label_size __snake_case : Optional[int] = ViTForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Any = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __snake_case : Dict = 1 __snake_case : Tuple = ViTForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __snake_case : List[Any] = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[Any] = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) lowercase__: int = ( {'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification} if is_torch_available() else {} ) lowercase__: Dict = True lowercase__: str = False lowercase__: Optional[int] = False lowercase__: Tuple = False def lowercase__ ( self : Tuple ) -> List[Any]: """simple docstring""" __snake_case : List[str] = ViTModelTester(self ) __snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Optional[Any] ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : int = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Tuple = [*signature.parameters.keys()] __snake_case : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ ) def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = ViTModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(__magic_name__ ) __snake_case : int = self.default_image_processor __snake_case : List[str] = prepare_img() __snake_case : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : str = model(**__magic_name__ ) # verify the logits __snake_case : Dict = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : List[str] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) ) @slow def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(__magic_name__ ) __snake_case : Dict = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_80 ) __snake_case : List[Any] = prepare_img() __snake_case : Optional[int] = image_processor(images=__magic_name__ , return_tensors="""pt""" ) __snake_case : str = inputs.pixel_values.to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : List[str] = model(__magic_name__ , interpolate_pos_encoding=__magic_name__ ) # verify the logits __snake_case : Optional[Any] = torch.Size((1, 36_01, 3_84) ) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Optional[int] = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Dict = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : List[str] = prepare_img() __snake_case : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ) __snake_case : Any = inputs.pixel_values.to(__magic_name__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __snake_case : List[Any] = model(__magic_name__ )
13
'''simple docstring''' __UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _a ( ) -> None: """simple docstring""" __snake_case : Dict = input("""Enter message: """ ) __snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ ) __snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): __snake_case : Any = """encrypt""" __snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase ) elif mode.lower().startswith("""d""" ): __snake_case : Optional[int] = """decrypt""" __snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase ) print(F'''\n{mode.title()}ed message:''' ) print(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : str = [] __snake_case : Dict = 0 __snake_case : Optional[int] = key.upper() for symbol in message: __snake_case : Any = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_lowerCamelCase ): __snake_case : Tuple = 0 else: translated.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : List[Any] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __snake_case : int = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __snake_case : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above __snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __snake_case : Dict = output[output != -float("""inf""" )] __snake_case : Optional[Any] = tf.cast( tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @require_tf class _A ( unittest.TestCase , __lowercase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase__: Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" __snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Optional[int] = 2 __snake_case : str = 2 class _A ( tf.Module ): def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Dict = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" __snake_case : Tuple = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : int = [[2, 0], [1_02, 1_03]] __snake_case : Tuple = [[1, 0], [1, 1]] __snake_case : Union[str, Any] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for batch_size in range(1 , len(__magic_name__ ) + 1 ): __snake_case : Union[str, Any] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } __snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""] __snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Dict = 1 __snake_case : int = 2 class _A ( tf.Module ): def __init__( self : Tuple , __magic_name__ : List[str] ) -> int: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : Union[str, Any] = [[2], [1_02, 1_03]] __snake_case : Tuple = [[1], [1, 1]] __snake_case : List[str] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for input_row in range(len(__magic_name__ ) ): __snake_case : Tuple = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } __snake_case : str = serving_func(**__magic_name__ )["""sequences"""] __snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow @require_tensorflow_text def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ ) class _A ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ) -> int: """simple docstring""" super().__init__() __snake_case : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() ) __snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ ) __snake_case , __snake_case : List[Any] = text.pad_model_inputs( __magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ ) return self.tokenizer.detokenize(__magic_name__ ) __snake_case : int = CompleteSentenceTransformer() __snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) __snake_case : Tuple = complete_model(__magic_name__ ) __snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ ) keras_model.save(__magic_name__ ) def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } __snake_case : str = 14 __snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : int = """Hello, my dog is cute and""" __snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" ) __snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : List[Any] = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __snake_case : Dict = [6_38, 1_98] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : str = """Hugging Face is a technology company based in New York and Paris.""" __snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids __snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : int = bart_model.generate(__magic_name__ ).numpy() class _A ( __lowercase ): def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) ) class _A ( bart_model.model.encoder.__class__ ): def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) __snake_case : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __snake_case : Dict = bart_model.generate(__magic_name__ ).numpy() with self.assertRaises(__magic_name__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__magic_name__ , foo="""bar""" )
13
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for attribute in key.split(""".""" ): __snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case : Union[str, Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : Tuple = value elif weight_type == "bias": __snake_case : str = value else: __snake_case : List[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = [] __snake_case : List[Any] = fairseq_model.state_dict() __snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : Any = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __snake_case : Dict = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2] __snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase ) if "weight_g" in name: __snake_case : Dict = """weight_g""" elif "weight_v" in name: __snake_case : List[str] = """weight_v""" elif "weight" in name: __snake_case : str = """weight""" elif "bias" in name: __snake_case : int = """bias""" else: __snake_case : int = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Dict = full_name.split("""conv_layers.""" )[-1] __snake_case : Optional[int] = name.split(""".""" ) __snake_case : Dict = int(items[0] ) __snake_case : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case : List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : List[str] = SEWConfig() if is_finetuned: __snake_case : List[Any] = model.wav_encoder.wav_model.cfg else: __snake_case : Optional[Any] = model.cfg __snake_case : Tuple = fs_config.conv_bias __snake_case : List[Any] = eval(fs_config.conv_feature_layers ) __snake_case : List[Any] = [x[0] for x in conv_layers] __snake_case : Dict = [x[1] for x in conv_layers] __snake_case : Tuple = [x[2] for x in conv_layers] __snake_case : List[str] = """gelu""" __snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" __snake_case : Optional[int] = 0.0 __snake_case : Optional[Any] = fs_config.activation_fn.name __snake_case : Dict = fs_config.encoder_embed_dim __snake_case : Dict = 0.02 __snake_case : Any = fs_config.encoder_ffn_embed_dim __snake_case : Tuple = 1E-5 __snake_case : Dict = fs_config.encoder_layerdrop __snake_case : Any = fs_config.encoder_attention_heads __snake_case : int = fs_config.conv_pos_groups __snake_case : Tuple = fs_config.conv_pos __snake_case : Optional[int] = len(_lowerCamelCase ) __snake_case : int = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : Union[str, Any] = model.cfg __snake_case : Tuple = fs_config.final_dropout __snake_case : Tuple = fs_config.layerdrop __snake_case : Any = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : List[Any] = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : str = fs_config.mask_channel_length __snake_case : Any = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : str = fs_config.mask_prob __snake_case : str = """Wav2Vec2FeatureExtractor""" __snake_case : Dict = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int: """simple docstring""" if is_finetuned: __snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase ) else: __snake_case : int = convert_config(model[0] , _lowerCamelCase ) __snake_case : Dict = model[0].eval() __snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __snake_case : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if is_finetuned: if dict_path: __snake_case : str = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Union[str, Any] = target_dict.pad_index __snake_case : Optional[Any] = target_dict.bos_index __snake_case : Tuple = target_dict.pad_index __snake_case : List[str] = target_dict.bos_index __snake_case : Optional[Any] = target_dict.eos_index __snake_case : List[str] = len(target_dict.symbols ) __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" ) if not os.path.isdir(_lowerCamelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _lowerCamelCase ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case : List[str] = SEWForCTC(_lowerCamelCase ) else: __snake_case : List[str] = SEWModel(_lowerCamelCase ) feature_extractor.save_pretrained(_lowerCamelCase ) recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __UpperCamelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
13
1
'''simple docstring''' __UpperCamelCase = range(2, 20 + 1) __UpperCamelCase = [10**k for k in range(ks[-1] + 1)] __UpperCamelCase = {} def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" __snake_case : str = sum(a_i[j] for j in range(_lowerCamelCase , len(_lowerCamelCase ) ) ) __snake_case : Dict = sum(a_i[j] * base[j] for j in range(min(len(_lowerCamelCase ) , _lowerCamelCase ) ) ) __snake_case , __snake_case : int = 0, 0 __snake_case : List[str] = n - i __snake_case : Optional[Any] = memo.get(_lowerCamelCase ) if sub_memo is not None: __snake_case : int = sub_memo.get(_lowerCamelCase ) if jumps is not None and len(_lowerCamelCase ) > 0: # find and make the largest jump without going over __snake_case : Optional[int] = -1 for _k in range(len(_lowerCamelCase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: __snake_case : Dict = _k break if max_jump >= 0: __snake_case , __snake_case , __snake_case : Any = jumps[max_jump] # since the difference between jumps is cached, add c __snake_case : int = diff + c for j in range(min(_lowerCamelCase , len(_lowerCamelCase ) ) ): __snake_case , __snake_case : int = divmod(_lowerCamelCase , 10 ) if new_c > 0: add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: __snake_case : List[str] = [] else: __snake_case : int = {c: []} __snake_case : Optional[int] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps __snake_case , __snake_case : str = next_term(_lowerCamelCase , k - 1 , i + dn , _lowerCamelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead __snake_case , __snake_case : List[str] = compute(_lowerCamelCase , _lowerCamelCase , i + dn , _lowerCamelCase ) diff += _diff dn += terms_jumped __snake_case : Union[str, Any] = sub_memo[c] # keep jumps sorted by # of terms skipped __snake_case : Union[str, Any] = 0 while j < len(_lowerCamelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(_lowerCamelCase , (diff, dn, k) ) return (diff, dn) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" if i >= n: return 0, i if k > len(_lowerCamelCase ): a_i.extend([0 for _ in range(k - len(_lowerCamelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) __snake_case : List[Any] = i __snake_case , __snake_case , __snake_case : str = 0, 0, 0 for j in range(len(_lowerCamelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 __snake_case : Dict = ds_c + ds_b diff += addend __snake_case : List[Any] = 0 for j in range(_lowerCamelCase ): __snake_case : str = a_i[j] + addend __snake_case , __snake_case : Any = divmod(_lowerCamelCase , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return diff, i - start_i def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[Any] = digits[j] + addend if s >= 10: __snake_case , __snake_case : List[str] = divmod(_lowerCamelCase , 10 ) __snake_case : List[Any] = addend // 10 + quotient else: __snake_case : int = s __snake_case : Optional[int] = addend // 10 if addend == 0: break while addend > 0: __snake_case , __snake_case : Union[str, Any] = divmod(_lowerCamelCase , 10 ) digits.append(_lowerCamelCase ) def _a ( _lowerCamelCase = 10**15 ) -> int: """simple docstring""" __snake_case : Dict = [1] __snake_case : Dict = 1 __snake_case : List[Any] = 0 while True: __snake_case , __snake_case : Any = next_term(_lowerCamelCase , 20 , i + dn , _lowerCamelCase ) dn += terms_jumped if dn == n - i: break __snake_case : Optional[Any] = 0 for j in range(len(_lowerCamelCase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f"""{solution() = }""")
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : Optional[int] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( _lowerCamelCase = 5000 ) -> int: """simple docstring""" __snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[int] = pentagonal_nums[j] __snake_case : str = pentagonal_i + pentagonal_j __snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
13
1
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = image_size __snake_case : Tuple = patch_size __snake_case : str = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : Dict = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : List[Any] = shape_size __snake_case : Tuple = num_labels __snake_case : List[Any] = num_choices __snake_case : Optional[Any] = scope __snake_case : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : List[str] = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __snake_case : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Union[str, Any] = bbox[i, j, 3] __snake_case : Union[str, Any] = bbox[i, j, 1] __snake_case : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Optional[Any] = bbox[i, j, 2] __snake_case : Tuple = bbox[i, j, 0] __snake_case : Optional[Any] = tmp_coordinate __snake_case : Dict = tf.constant(__magic_name__ ) __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ ) # text + image __snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) __snake_case : List[str] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , ) __snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Any = self.num_labels __snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ ) __snake_case : Tuple = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = 2 __snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs __snake_case : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase__: Union[str, Any] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase__: Dict = False lowercase__: int = False lowercase__: Dict = False def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" return True def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict: """simple docstring""" __snake_case : Any = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = { k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : str = TFLayoutLMvaModelTester(self ) __snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ): # The number of elements in the loss should be the same as the number of elements in the label __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0] ] __snake_case : List[str] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = prepared_for_class.pop("""input_ids""" ) __snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : str = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __snake_case : str = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __snake_case : Dict = -1_00 __snake_case : str = tf.convert_to_tensor(__magic_name__ ) __snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = model(__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) # Get keys that were added with the _prepare_for_class function __snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys() __snake_case : Optional[Any] = inspect.signature(model.call ).parameters __snake_case : int = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __snake_case : Union[str, Any] = {0: """input_ids"""} for label_key in label_keys: __snake_case : int = signature_names.index(__magic_name__ ) __snake_case : Optional[int] = label_key __snake_case : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __snake_case : List[str] = prepared_for_class[value] __snake_case : str = tuple(__magic_name__ ) # Send to model __snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Tuple = type self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __snake_case : str = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values __snake_case : Tuple = tf.constant([[1, 2]] ) __snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) # verify the logits __snake_case : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Tuple = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : List[Any] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __snake_case : int = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __snake_case : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above __snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __snake_case : Dict = output[output != -float("""inf""" )] __snake_case : Optional[Any] = tf.cast( tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @require_tf class _A ( unittest.TestCase , __lowercase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase__: Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" __snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Optional[int] = 2 __snake_case : str = 2 class _A ( tf.Module ): def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Dict = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" __snake_case : Tuple = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : int = [[2, 0], [1_02, 1_03]] __snake_case : Tuple = [[1, 0], [1, 1]] __snake_case : Union[str, Any] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for batch_size in range(1 , len(__magic_name__ ) + 1 ): __snake_case : Union[str, Any] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } __snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""] __snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Dict = 1 __snake_case : int = 2 class _A ( tf.Module ): def __init__( self : Tuple , __magic_name__ : List[str] ) -> int: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : Union[str, Any] = [[2], [1_02, 1_03]] __snake_case : Tuple = [[1], [1, 1]] __snake_case : List[str] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for input_row in range(len(__magic_name__ ) ): __snake_case : Tuple = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } __snake_case : str = serving_func(**__magic_name__ )["""sequences"""] __snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow @require_tensorflow_text def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ ) class _A ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ) -> int: """simple docstring""" super().__init__() __snake_case : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() ) __snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ ) __snake_case , __snake_case : List[Any] = text.pad_model_inputs( __magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ ) return self.tokenizer.detokenize(__magic_name__ ) __snake_case : int = CompleteSentenceTransformer() __snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) __snake_case : Tuple = complete_model(__magic_name__ ) __snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ ) keras_model.save(__magic_name__ ) def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } __snake_case : str = 14 __snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : int = """Hello, my dog is cute and""" __snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" ) __snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : List[Any] = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __snake_case : Dict = [6_38, 1_98] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : str = """Hugging Face is a technology company based in New York and Paris.""" __snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids __snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : int = bart_model.generate(__magic_name__ ).numpy() class _A ( __lowercase ): def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) ) class _A ( bart_model.model.encoder.__class__ ): def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) __snake_case : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __snake_case : Dict = bart_model.generate(__magic_name__ ).numpy() with self.assertRaises(__magic_name__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__magic_name__ , foo="""bar""" )
13
1
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class _A ( __lowercase ): lowercase__: str = '''codegen''' lowercase__: Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int: """simple docstring""" __snake_case : List[str] = vocab_size __snake_case : Union[str, Any] = n_ctx __snake_case : int = n_positions __snake_case : str = n_embd __snake_case : Dict = n_layer __snake_case : List[Any] = n_head __snake_case : Any = n_inner __snake_case : str = rotary_dim __snake_case : List[str] = activation_function __snake_case : Tuple = resid_pdrop __snake_case : Dict = embd_pdrop __snake_case : int = attn_pdrop __snake_case : Tuple = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = use_cache __snake_case : Dict = bos_token_id __snake_case : Union[str, Any] = eos_token_id super().__init__( bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ ) class _A ( __lowercase ): def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple: """simple docstring""" super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ ) if not getattr(self._config , """pad_token_id""" , __magic_name__ ): # TODO: how to do that better? __snake_case : List[str] = 0 @property def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) __snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: __snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return self._config.n_layer @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.n_head def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() __snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __snake_case , __snake_case : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __snake_case : Tuple = seqlen + 2 __snake_case : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : List[str] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] __snake_case : Optional[int] = common_inputs["""attention_mask"""] if self.use_past: __snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype __snake_case : Optional[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return 13
13
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" __snake_case : int = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def _a ( _lowerCamelCase ) -> None: """simple docstring""" __snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("""""" ) print(len(_lowerCamelCase ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
1
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __UpperCamelCase = TypeVar("T") def _a ( _lowerCamelCase ) -> int: """simple docstring""" return (position - 1) // 2 def _a ( _lowerCamelCase ) -> int: """simple docstring""" return (2 * position) + 1 def _a ( _lowerCamelCase ) -> int: """simple docstring""" return (2 * position) + 2 class _A ( Generic[T] ): def __init__( self : Any ) -> None: """simple docstring""" __snake_case : list[tuple[T, int]] = [] __snake_case : dict[T, int] = {} __snake_case : int = 0 def __len__( self : Any ) -> int: """simple docstring""" return self.elements def __repr__( self : Tuple ) -> str: """simple docstring""" return str(self.heap ) def lowercase__ ( self : Optional[int] ) -> bool: """simple docstring""" return self.elements == 0 def lowercase__ ( self : Any , __magic_name__ : T , __magic_name__ : int ) -> None: """simple docstring""" self.heap.append((elem, weight) ) __snake_case : List[str] = self.elements self.elements += 1 self._bubble_up(__magic_name__ ) def lowercase__ ( self : Optional[int] ) -> T: """simple docstring""" if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __snake_case , __snake_case : int = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __snake_case , __snake_case : Dict = self.heap[0] self._bubble_down(__magic_name__ ) return elem def lowercase__ ( self : Dict , __magic_name__ : T , __magic_name__ : int ) -> None: """simple docstring""" __snake_case : List[Any] = self.position_map[elem] __snake_case : Union[str, Any] = (elem, weight) if position > 0: __snake_case : List[Any] = get_parent_position(__magic_name__ ) __snake_case , __snake_case : str = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__magic_name__ ) else: self._bubble_down(__magic_name__ ) else: self._bubble_down(__magic_name__ ) def lowercase__ ( self : int , __magic_name__ : T ) -> None: """simple docstring""" __snake_case : Tuple = self.position_map[elem] if curr_pos == 0: return None __snake_case : Optional[Any] = get_parent_position(__magic_name__ ) __snake_case , __snake_case : Any = self.heap[curr_pos] __snake_case , __snake_case : Union[str, Any] = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__magic_name__ , __magic_name__ ) return self._bubble_up(__magic_name__ ) return None def lowercase__ ( self : List[str] , __magic_name__ : T ) -> None: """simple docstring""" __snake_case : Optional[int] = self.position_map[elem] __snake_case , __snake_case : List[str] = self.heap[curr_pos] __snake_case : List[Any] = get_child_left_position(__magic_name__ ) __snake_case : Union[str, Any] = get_child_right_position(__magic_name__ ) if child_left_position < self.elements and child_right_position < self.elements: __snake_case , __snake_case : str = self.heap[child_left_position] __snake_case , __snake_case : List[Any] = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__magic_name__ , __magic_name__ ) return self._bubble_down(__magic_name__ ) if child_left_position < self.elements: __snake_case , __snake_case : str = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__magic_name__ , __magic_name__ ) return self._bubble_down(__magic_name__ ) else: return None if child_right_position < self.elements: __snake_case , __snake_case : Optional[int] = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__magic_name__ , __magic_name__ ) return self._bubble_down(__magic_name__ ) return None def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" __snake_case : str = self.heap[nodea_pos][0] __snake_case : Tuple = self.heap[nodea_pos][0] __snake_case , __snake_case : List[str] = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __snake_case : str = nodea_pos __snake_case : Any = nodea_pos class _A ( Generic[T] ): def __init__( self : Optional[Any] ) -> None: """simple docstring""" __snake_case : dict[T, dict[T, int]] = {} __snake_case : int = 0 def __repr__( self : Union[str, Any] ) -> str: """simple docstring""" return str(self.connections ) def __len__( self : Any ) -> int: """simple docstring""" return self.nodes def lowercase__ ( self : Optional[int] , __magic_name__ : T ) -> None: """simple docstring""" if node not in self.connections: __snake_case : str = {} self.nodes += 1 def lowercase__ ( self : int , __magic_name__ : T , __magic_name__ : T , __magic_name__ : int ) -> None: """simple docstring""" self.add_node(__magic_name__ ) self.add_node(__magic_name__ ) __snake_case : int = weight __snake_case : str = weight def _a ( _lowerCamelCase , ) -> tuple[dict[T, int], dict[T, T | None]]: """simple docstring""" __snake_case : dict[T, int] = {node: maxsize for node in graph.connections} __snake_case : dict[T, T | None] = {node: None for node in graph.connections} __snake_case : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_lowerCamelCase , _lowerCamelCase ) if priority_queue.is_empty(): return dist, parent # initialization __snake_case : List[str] = priority_queue.extract_min() __snake_case : List[str] = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __snake_case : Dict = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_lowerCamelCase , dist[neighbour] ) __snake_case : List[str] = node # running prim's algorithm while not priority_queue.is_empty(): __snake_case : Tuple = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __snake_case : Tuple = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_lowerCamelCase , dist[neighbour] ) __snake_case : Optional[Any] = node return dist, parent
13
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCamelCase = logging.getLogger(__name__) class _A ( __lowercase ): def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" super().__init__( __magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , ) __snake_case : List[str] = None def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]: """simple docstring""" logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __snake_case : List[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __snake_case : List[str] = str(distributed_port + 1 ) __snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase__ ( self : int ) -> int: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ ) dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group ) return target_tensor def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ ) return ifname def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): __snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ ) # distributed training __snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic __snake_case : Tuple = None if self._is_main(): __snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )] dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group ) # scatter logic __snake_case : Optional[int] = question_hidden_states.shape[0] __snake_case : Optional[Any] = [] __snake_case : Any = [] if self._is_main(): assert len(__magic_name__ ) == world_size __snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ ) __snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa ) __snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
13
1
'''simple docstring''' def _a ( _lowerCamelCase = 100_0000 ) -> int: """simple docstring""" __snake_case : str = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _lowerCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class _A : lowercase__: str lowercase__: Optional[str] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return self.major, self.minor, self.patch def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return Version(__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): return other raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' ) def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]: """simple docstring""" try: __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) return self.tuple < other.tuple def __hash__( self : Any ) -> Any: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase__ ( self : str ) -> str: """simple docstring""" return self.version_str def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase ) if not res: raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" return ".".join(str(_lowerCamelCase ) for v in version_tuple )
13
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _A ( __lowercase ): lowercase__: UNetaDModel lowercase__: KarrasVeScheduler def __init__( self : Tuple , __magic_name__ : UNetaDModel , __magic_name__ : KarrasVeScheduler ) -> Union[str, Any]: """simple docstring""" super().__init__() self.register_modules(unet=__magic_name__ , scheduler=__magic_name__ ) @torch.no_grad() def __call__( self : List[Any] , __magic_name__ : int = 1 , __magic_name__ : int = 50 , __magic_name__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , **__magic_name__ : Dict , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" __snake_case : Optional[Any] = self.unet.config.sample_size __snake_case : Union[str, Any] = (batch_size, 3, img_size, img_size) __snake_case : str = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) __snake_case : Optional[int] = randn_tensor(__magic_name__ , generator=__magic_name__ , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__magic_name__ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper __snake_case : str = self.scheduler.schedule[t] __snake_case : Optional[int] = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat __snake_case , __snake_case : List[str] = self.scheduler.add_noise_to_input(__magic_name__ , __magic_name__ , generator=__magic_name__ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. __snake_case : str = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev __snake_case : int = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. __snake_case : Tuple = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample __snake_case : Dict = self.scheduler.step_correct( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , step_output.prev_sample , step_output["""derivative"""] , ) __snake_case : Union[str, Any] = step_output.prev_sample __snake_case : Union[str, Any] = (sample / 2 + 0.5).clamp(0 , 1 ) __snake_case : List[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __snake_case : str = self.numpy_to_pil(__magic_name__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=__magic_name__ )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) __snake_case : Tuple = """""" while len(_lowerCamelCase ) % 3 != 0: __snake_case : Any = """0""" + bin_string __snake_case : Tuple = [ bin_string[index : index + 3] for index in range(len(_lowerCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __snake_case : Tuple = 0 for index, val in enumerate(_lowerCamelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) ) oct_string += str(_lowerCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
13
1
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = len(_lowerCamelCase ) for i in range(length - 1 ): __snake_case : Union[str, Any] = i for k in range(i + 1 , _lowerCamelCase ): if collection[k] < collection[least]: __snake_case : int = k if least != i: __snake_case , __snake_case : Optional[int] = (collection[i], collection[least]) return collection if __name__ == "__main__": __UpperCamelCase = input("Enter numbers separated by a comma:\n").strip() __UpperCamelCase = [int(item) for item in user_input.split(",")] print(selection_sort(unsorted))
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class _A : def __init__( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = {} def lowercase__ ( self : Optional[Any] , __magic_name__ : str ) -> None: """simple docstring""" __snake_case : Union[str, Any] = {} def lowercase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ) -> None: """simple docstring""" if nodea not in self.connections: self.add_node(__magic_name__ ) if nodea not in self.connections: self.add_node(__magic_name__ ) __snake_case : Any = probability def lowercase__ ( self : Tuple ) -> list[str]: """simple docstring""" return list(self.connections ) def lowercase__ ( self : int , __magic_name__ : str ) -> str: """simple docstring""" __snake_case : Optional[int] = 0 __snake_case : List[Any] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> dict[str, int]: """simple docstring""" __snake_case : List[str] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : str = Counter(graph.get_nodes() ) __snake_case : str = start for _ in range(_lowerCamelCase ): __snake_case : Any = graph.transition(_lowerCamelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for attribute in key.split(""".""" ): __snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case : Union[str, Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : Tuple = value elif weight_type == "bias": __snake_case : str = value else: __snake_case : List[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = [] __snake_case : List[Any] = fairseq_model.state_dict() __snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : Any = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __snake_case : Dict = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2] __snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase ) if "weight_g" in name: __snake_case : Dict = """weight_g""" elif "weight_v" in name: __snake_case : List[str] = """weight_v""" elif "weight" in name: __snake_case : str = """weight""" elif "bias" in name: __snake_case : int = """bias""" else: __snake_case : int = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Dict = full_name.split("""conv_layers.""" )[-1] __snake_case : Optional[int] = name.split(""".""" ) __snake_case : Dict = int(items[0] ) __snake_case : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case : List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : List[str] = SEWConfig() if is_finetuned: __snake_case : List[Any] = model.wav_encoder.wav_model.cfg else: __snake_case : Optional[Any] = model.cfg __snake_case : Tuple = fs_config.conv_bias __snake_case : List[Any] = eval(fs_config.conv_feature_layers ) __snake_case : List[Any] = [x[0] for x in conv_layers] __snake_case : Dict = [x[1] for x in conv_layers] __snake_case : Tuple = [x[2] for x in conv_layers] __snake_case : List[str] = """gelu""" __snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" __snake_case : Optional[int] = 0.0 __snake_case : Optional[Any] = fs_config.activation_fn.name __snake_case : Dict = fs_config.encoder_embed_dim __snake_case : Dict = 0.02 __snake_case : Any = fs_config.encoder_ffn_embed_dim __snake_case : Tuple = 1E-5 __snake_case : Dict = fs_config.encoder_layerdrop __snake_case : Any = fs_config.encoder_attention_heads __snake_case : int = fs_config.conv_pos_groups __snake_case : Tuple = fs_config.conv_pos __snake_case : Optional[int] = len(_lowerCamelCase ) __snake_case : int = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : Union[str, Any] = model.cfg __snake_case : Tuple = fs_config.final_dropout __snake_case : Tuple = fs_config.layerdrop __snake_case : Any = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : List[Any] = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : str = fs_config.mask_channel_length __snake_case : Any = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : str = fs_config.mask_prob __snake_case : str = """Wav2Vec2FeatureExtractor""" __snake_case : Dict = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int: """simple docstring""" if is_finetuned: __snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase ) else: __snake_case : int = convert_config(model[0] , _lowerCamelCase ) __snake_case : Dict = model[0].eval() __snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __snake_case : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if is_finetuned: if dict_path: __snake_case : str = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Union[str, Any] = target_dict.pad_index __snake_case : Optional[Any] = target_dict.bos_index __snake_case : Tuple = target_dict.pad_index __snake_case : List[str] = target_dict.bos_index __snake_case : Optional[Any] = target_dict.eos_index __snake_case : List[str] = len(target_dict.symbols ) __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" ) if not os.path.isdir(_lowerCamelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _lowerCamelCase ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case : List[str] = SEWForCTC(_lowerCamelCase ) else: __snake_case : List[str] = SEWModel(_lowerCamelCase ) feature_extractor.save_pretrained(_lowerCamelCase ) recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __UpperCamelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
13
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = image_size __snake_case : Tuple = patch_size __snake_case : str = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : Dict = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : List[Any] = shape_size __snake_case : Tuple = num_labels __snake_case : List[Any] = num_choices __snake_case : Optional[Any] = scope __snake_case : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : List[str] = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __snake_case : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Union[str, Any] = bbox[i, j, 3] __snake_case : Union[str, Any] = bbox[i, j, 1] __snake_case : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Optional[Any] = bbox[i, j, 2] __snake_case : Tuple = bbox[i, j, 0] __snake_case : Optional[Any] = tmp_coordinate __snake_case : Dict = tf.constant(__magic_name__ ) __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ ) # text + image __snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) __snake_case : List[str] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , ) __snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Any = self.num_labels __snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ ) __snake_case : Tuple = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = 2 __snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs __snake_case : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase__: Union[str, Any] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase__: Dict = False lowercase__: int = False lowercase__: Dict = False def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" return True def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict: """simple docstring""" __snake_case : Any = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = { k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : str = TFLayoutLMvaModelTester(self ) __snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ): # The number of elements in the loss should be the same as the number of elements in the label __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0] ] __snake_case : List[str] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = prepared_for_class.pop("""input_ids""" ) __snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : str = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __snake_case : str = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __snake_case : Dict = -1_00 __snake_case : str = tf.convert_to_tensor(__magic_name__ ) __snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = model(__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) # Get keys that were added with the _prepare_for_class function __snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys() __snake_case : Optional[Any] = inspect.signature(model.call ).parameters __snake_case : int = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __snake_case : Union[str, Any] = {0: """input_ids"""} for label_key in label_keys: __snake_case : int = signature_names.index(__magic_name__ ) __snake_case : Optional[int] = label_key __snake_case : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __snake_case : List[str] = prepared_for_class[value] __snake_case : str = tuple(__magic_name__ ) # Send to model __snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Tuple = type self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __snake_case : str = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values __snake_case : Tuple = tf.constant([[1, 2]] ) __snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) # verify the logits __snake_case : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Tuple = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' def _a ( _lowerCamelCase , _lowerCamelCase ) -> list: """simple docstring""" __snake_case : Optional[int] = word.split() def justify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: __snake_case : List[Any] = max_width - width __snake_case : List[str] = len(_lowerCamelCase ) if len(_lowerCamelCase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: __snake_case : str = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] __snake_case : Any = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] __snake_case : Optional[Any] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowerCamelCase ): num_spaces_between_words_list[i] += 1 __snake_case : Union[str, Any] = [] for i in range(_lowerCamelCase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * """ """ ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowerCamelCase ) __snake_case : Any = [] __snake_case : list[str] = [] __snake_case : Dict = 0 for word in words: if width + len(_lowerCamelCase ) + len(_lowerCamelCase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowerCamelCase ) width += len(_lowerCamelCase ) else: # justify the line and add it to result answer.append(justify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ) # reset new line and new width __snake_case , __snake_case : Optional[int] = [word], len(_lowerCamelCase ) __snake_case : Optional[int] = max_width - width - len(_lowerCamelCase ) answer.append(""" """.join(_lowerCamelCase ) + (remaining_spaces + 1) * """ """ ) return answer if __name__ == "__main__": from doctest import testmod testmod()
13
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]: """simple docstring""" __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[Any] = num_channels __snake_case : List[str] = patch_size __snake_case : List[str] = num_frames __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : str = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = attention_type __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scope __snake_case : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case : str = (image_size // patch_size) ** 2 __snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case : str = self.num_labels return config def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" __snake_case : Optional[int] = TimesformerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str: """simple docstring""" __snake_case : Any = TimesformerForVideoClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) # verify the logits shape __snake_case : Dict = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__: List[Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__: List[str] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : List[str] = TimesformerModelTester(self ) __snake_case : List[Any] = ConfigTester( self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int: """simple docstring""" __snake_case : Dict = copy.deepcopy(__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = TimesformerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : List[str] = self.model_tester.seq_length __snake_case : Tuple = self.model_tester.num_frames __snake_case : str = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Dict = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Optional[int] = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case : int = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Optional[int] = True __snake_case : Optional[int] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : List[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ): __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.hidden_states __snake_case : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : List[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( __magic_name__ ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : Dict = prepare_video() __snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __UpperCamelCase = logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class _A ( unittest.TestCase ): def lowercase__ ( self : int , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> List[str]: """simple docstring""" __snake_case : Tuple = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )] if identifier is not None: __snake_case : Tuple = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(__magic_name__ , __magic_name__ ): for n_ in n_identifier: __snake_case : List[Any] = [file for file in files if n_ not in file] else: __snake_case : Union[str, Any] = [file for file in files if n_identifier not in file] __snake_case : Any = ignore_files or [] ignore_files.append("""__init__.py""" ) __snake_case : Union[str, Any] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , __magic_name__ ) if only_modules: __snake_case : Dict = file.split(""".""" )[0] try: __snake_case : List[str] = getattr(__magic_name__ , __magic_name__ ) __snake_case : List[Any] = doctest.DocTestSuite(__magic_name__ ) __snake_case : Tuple = unittest.TextTestRunner().run(__magic_name__ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: __snake_case : Optional[Any] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowercase__ ( self : List[Any] ) -> int: """simple docstring""" __snake_case : List[str] = Path("""src/transformers""" ) __snake_case : List[str] = """modeling""" __snake_case : List[str] = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : List[str] = Path("""src/transformers""" ) __snake_case : List[Any] = """tokenization""" self.analyze_directory(__magic_name__ , identifier=__magic_name__ ) def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case : str = Path("""src/transformers""" ) __snake_case : Optional[int] = """configuration""" self.analyze_directory(__magic_name__ , identifier=__magic_name__ ) def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" __snake_case : Dict = Path("""src/transformers""" ) __snake_case : List[Any] = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ ) def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : Any = Path("""docs/source""" ) __snake_case : Optional[int] = ["""favicon.ico"""] self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ConditionalDetrFeatureExtractor"] __UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' def _a ( _lowerCamelCase ) -> list[int]: """simple docstring""" if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(_lowerCamelCase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : str = 0 __snake_case : Optional[int] = len(_lowerCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" if len(_lowerCamelCase ) <= 1: return arr, 0 __snake_case : Any = len(_lowerCamelCase ) // 2 __snake_case : List[str] = arr[0:mid] __snake_case : int = arr[mid:] __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase ) __snake_case : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = [] __snake_case : List[str] = 0 while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , _lowerCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __snake_case : Any = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) # an empty list should also have zero inversions __snake_case : List[Any] = [] __snake_case : List[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' import logging from transformers import PretrainedConfig __UpperCamelCase = logging.getLogger(__name__) __UpperCamelCase = { "bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json", } class _A ( __lowercase ): lowercase__: List[str] = '''bertabs''' def __init__( self : Optional[Any] , __magic_name__ : Optional[int]=3_05_22 , __magic_name__ : List[Any]=5_12 , __magic_name__ : Any=6 , __magic_name__ : Dict=5_12 , __magic_name__ : Any=8 , __magic_name__ : Dict=5_12 , __magic_name__ : int=0.2 , __magic_name__ : str=6 , __magic_name__ : int=7_68 , __magic_name__ : Any=8 , __magic_name__ : List[Any]=20_48 , __magic_name__ : Any=0.2 , **__magic_name__ : Union[str, Any] , ) -> int: """simple docstring""" super().__init__(**__magic_name__ ) __snake_case : str = vocab_size __snake_case : Union[str, Any] = max_pos __snake_case : Any = enc_layers __snake_case : List[str] = enc_hidden_size __snake_case : int = enc_heads __snake_case : Union[str, Any] = enc_ff_size __snake_case : List[str] = enc_dropout __snake_case : Union[str, Any] = dec_layers __snake_case : List[str] = dec_hidden_size __snake_case : Optional[int] = dec_heads __snake_case : Optional[int] = dec_ff_size __snake_case : Union[str, Any] = dec_dropout
13
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
13
1
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __UpperCamelCase = 10 def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" for i in range(_lowerCamelCase , _lowerCamelCase ): if array[i] == target: return i return -1 def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : List[Any] = 0 __snake_case : List[str] = len(_lowerCamelCase ) while left <= right: if right - left < precision: return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : List[Any] = (left + right) // 3 + 1 __snake_case : str = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: __snake_case : List[Any] = one_third - 1 elif array[two_third] < target: __snake_case : Tuple = two_third + 1 else: __snake_case : str = one_third + 1 __snake_case : Tuple = two_third - 1 else: return -1 def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" if left < right: if right - left < precision: return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : List[Any] = (left + right) // 3 + 1 __snake_case : Union[str, Any] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase = input("Enter numbers separated by comma:\n").strip() __UpperCamelCase = [int(item.strip()) for item in user_input.split(",")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." __UpperCamelCase = int(input("Enter the number to be found in the list:\n").strip()) __UpperCamelCase = ite_ternary_search(collection, target) __UpperCamelCase = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("Not found")
13
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = CanineTokenizer lowercase__: Optional[int] = False def lowercase__ ( self : Any ) -> Any: """simple docstring""" super().setUp() __snake_case : Dict = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer: """simple docstring""" __snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) __snake_case : Optional[Any] = 10_24 return tokenizer @require_torch def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[Any] = self.canine_tokenizer __snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off __snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Any = self.canine_tokenizer __snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] __snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.canine_tokenizer __snake_case : Optional[Any] = [ """What's the weater?""", """It's about 25 degrees.""", ] __snake_case : Any = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Dict = tempfile.mkdtemp() __snake_case : str = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) __snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Optional[int] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __snake_case : List[Any] = chr(0xE007 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE005 __snake_case : Tuple = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) __snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) __snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) __snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) __snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : Dict = chr(0xE005 ) __snake_case : str = chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) __snake_case : Tuple = tokenizer.tokenize(__magic_name__ ) __snake_case : Any = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __snake_case : Optional[Any] = 0xE006 __snake_case : List[str] = chr(__magic_name__ ) __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Any = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Tuple = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE006 __snake_case : int = chr(__magic_name__ ) __snake_case : List[Any] = [new_token_a] __snake_case : Union[str, Any] = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __snake_case : Any = 0xE007 __snake_case : Any = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] __snake_case : Union[str, Any] = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : List[str] = """hello world""" if self.space_between_special_tokens: __snake_case : Union[str, Any] = """[CLS] hello world [SEP]""" else: __snake_case : List[Any] = input __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowercase__ ( self : Tuple ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : str = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __snake_case : Dict = """a""" __snake_case : Tuple = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) __snake_case : Dict = 0xE006 __snake_case : str = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" pass def lowercase__ ( self : str ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass
13
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
13
1
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": __UpperCamelCase = input("Enter image url: ").strip() print(f"""Downloading image from {url} ...""") __UpperCamelCase = BeautifulSoup(requests.get(url).content, "html.parser") # The image URL is in the content field of the first meta tag with property og:image __UpperCamelCase = soup.find("meta", {"property": "og:image"})["content"] __UpperCamelCase = requests.get(image_url).content __UpperCamelCase = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, "wb") as fp: fp.write(image_data) print(f"""Done. Image saved to disk as {file_name}.""")
13
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class _A ( __lowercase ): lowercase__: str = '''codegen''' lowercase__: Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int: """simple docstring""" __snake_case : List[str] = vocab_size __snake_case : Union[str, Any] = n_ctx __snake_case : int = n_positions __snake_case : str = n_embd __snake_case : Dict = n_layer __snake_case : List[Any] = n_head __snake_case : Any = n_inner __snake_case : str = rotary_dim __snake_case : List[str] = activation_function __snake_case : Tuple = resid_pdrop __snake_case : Dict = embd_pdrop __snake_case : int = attn_pdrop __snake_case : Tuple = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = use_cache __snake_case : Dict = bos_token_id __snake_case : Union[str, Any] = eos_token_id super().__init__( bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ ) class _A ( __lowercase ): def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple: """simple docstring""" super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ ) if not getattr(self._config , """pad_token_id""" , __magic_name__ ): # TODO: how to do that better? __snake_case : List[str] = 0 @property def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) __snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: __snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return self._config.n_layer @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.n_head def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() __snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __snake_case , __snake_case : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __snake_case : Tuple = seqlen + 2 __snake_case : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : List[str] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] __snake_case : Optional[int] = common_inputs["""attention_mask"""] if self.use_past: __snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype __snake_case : Optional[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return 13
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCamelCase = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( __lowercase , unittest.TestCase ): lowercase__: int = KandinskyImgaImgPipeline lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] lowercase__: int = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] lowercase__: List[Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowercase__: Any = False @property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return 32 @property def lowercase__ ( self : str ) -> str: """simple docstring""" return 32 @property def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" return self.time_input_dim @property def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return 1_00 @property def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" __snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) __snake_case : Tuple = MultilingualCLIP(__magic_name__ ) __snake_case : Optional[Any] = text_encoder.eval() return text_encoder @property def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __snake_case : Tuple = UNetaDConditionModel(**__magic_name__ ) return model @property def lowercase__ ( self : str ) -> Dict: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : Tuple = self.dummy_text_encoder __snake_case : Dict = self.dummy_tokenizer __snake_case : Dict = self.dummy_unet __snake_case : int = self.dummy_movq __snake_case : List[Any] = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __snake_case : Dict = DDIMScheduler(**__magic_name__ ) __snake_case : Any = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str: """simple docstring""" __snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ ) # create init_image __snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__magic_name__ ).startswith("""mps""" ): __snake_case : str = torch.manual_seed(__magic_name__ ) else: __snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : Optional[Any] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : Dict = """cpu""" __snake_case : Union[str, Any] = self.get_dummy_components() __snake_case : List[str] = self.pipeline_class(**__magic_name__ ) __snake_case : Optional[Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) ) __snake_case : List[str] = output.images __snake_case : Any = pipe( **self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0] __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : int = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) __snake_case : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __snake_case : List[Any] = """A red cartoon frog, 4k""" __snake_case : str = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__magic_name__ ) __snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) __snake_case : Any = pipeline.to(__magic_name__ ) pipeline.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case , __snake_case : Optional[Any] = pipe_prior( __magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __snake_case : List[str] = pipeline( __magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) __snake_case : Dict = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
13
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "spiece.model"} __UpperCamelCase = { "vocab_file": { "TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model", } } class _A ( __lowercase ): def __init__( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any]=False , __magic_name__ : int=True , __magic_name__ : Dict=False , __magic_name__ : Optional[Any]="<s>" , __magic_name__ : Any="</s>" , __magic_name__ : Optional[Any]="<unk>" , __magic_name__ : int="<sep>" , __magic_name__ : int="<pad>" , __magic_name__ : List[Any]="<cls>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Tuple=["<eop>", "<eod>"] , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Any , ) -> None: """simple docstring""" __snake_case : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token __snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) __snake_case : Dict = 3 __snake_case : Optional[int] = do_lower_case __snake_case : str = remove_space __snake_case : List[Any] = keep_accents __snake_case : Dict = vocab_file __snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) __snake_case : Optional[int] = jieba __snake_case : int = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def lowercase__ ( self : str ) -> Optional[Any]: """simple docstring""" return len(self.sp_model ) def lowercase__ ( self : int ) -> Dict: """simple docstring""" __snake_case : List[str] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Any ) -> Tuple: """simple docstring""" __snake_case : List[Any] = self.__dict__.copy() __snake_case : int = None return state def __setstate__( self : int , __magic_name__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" __snake_case : List[Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __snake_case : Union[str, Any] = {} __snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Any: """simple docstring""" if self.remove_space: __snake_case : Optional[Any] = """ """.join(inputs.strip().split() ) else: __snake_case : Any = inputs __snake_case : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: __snake_case : Optional[Any] = unicodedata.normalize("""NFKD""" , __magic_name__ ) __snake_case : Dict = """""".join([c for c in outputs if not unicodedata.combining(__magic_name__ )] ) if self.do_lower_case: __snake_case : Union[str, Any] = outputs.lower() return outputs def lowercase__ ( self : Union[str, Any] , __magic_name__ : str ) -> List[str]: """simple docstring""" __snake_case : int = self.preprocess_text(__magic_name__ ) __snake_case : str = self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) __snake_case : str = [] for piece in pieces: if len(__magic_name__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): __snake_case : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__magic_name__ , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __snake_case : List[Any] = cur_pieces[1:] else: __snake_case : str = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__magic_name__ ) else: new_pieces.append(__magic_name__ ) return new_pieces def lowercase__ ( self : Optional[Any] , __magic_name__ : int ) -> Dict: """simple docstring""" return self.sp_model.PieceToId(__magic_name__ ) def lowercase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" return self.sp_model.IdToPiece(__magic_name__ ) def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] ) -> int: """simple docstring""" __snake_case : List[Any] = """""".join(__magic_name__ ).replace(__magic_name__ , """ """ ).strip() return out_string def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : str = [self.sep_token_id] __snake_case : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase__ ( self : Tuple , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is not None: return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1, 1] return ([0] * len(__magic_name__ )) + [1, 1] def lowercase__ ( self : Optional[int] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Tuple = [self.sep_token_id] __snake_case : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __snake_case : List[str] = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , """wb""" ) as fi: __snake_case : List[str] = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,) def lowercase__ ( self : Dict , *__magic_name__ : Dict , **__magic_name__ : Union[str, Any] ) -> str: """simple docstring""" __snake_case : Dict = super()._decode(*__magic_name__ , **__magic_name__ ) __snake_case : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __UpperCamelCase = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __UpperCamelCase = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } class _A ( __lowercase ): lowercase__: Any = VOCAB_FILES_NAMES lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask'''] lowercase__: List[str] = BartTokenizer def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]: """simple docstring""" super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) __snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) ) __snake_case : str = add_prefix_space __snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ ) __snake_case : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __snake_case : Any = """post_processor""" __snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: __snake_case : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __snake_case : Tuple = tuple(state["""sep"""] ) if "cls" in state: __snake_case : int = tuple(state["""cls"""] ) __snake_case : Optional[int] = False if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : Optional[Any] = add_prefix_space __snake_case : List[str] = True if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets: __snake_case : Optional[int] = trim_offsets __snake_case : Any = True if changes_to_apply: __snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) ) __snake_case : List[Any] = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def lowercase__ ( self : List[Any] ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value __snake_case : Union[str, Any] = value def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding: """simple docstring""" __snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding: """simple docstring""" __snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Optional[int] = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
13
1
'''simple docstring''' import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = ["""a""", """b""", """c"""] # Defaults to last layer if both are None __snake_case , __snake_case : Optional[int] = get_aligned_output_features_output_indices(__magic_name__ , __magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , ["""c"""] ) self.assertEqual(__magic_name__ , [2] ) # Out indices set to match out features __snake_case , __snake_case : List[Any] = get_aligned_output_features_output_indices(["""a""", """c"""] , __magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , ["""a""", """c"""] ) self.assertEqual(__magic_name__ , [0, 2] ) # Out features set to match out indices __snake_case , __snake_case : Optional[int] = get_aligned_output_features_output_indices(__magic_name__ , [0, 2] , __magic_name__ ) self.assertEqual(__magic_name__ , ["""a""", """c"""] ) self.assertEqual(__magic_name__ , [0, 2] ) # Out features selected from negative indices __snake_case , __snake_case : List[str] = get_aligned_output_features_output_indices(__magic_name__ , [-3, -1] , __magic_name__ ) self.assertEqual(__magic_name__ , ["""a""", """c"""] ) self.assertEqual(__magic_name__ , [-3, -1] ) def lowercase__ ( self : str ) -> List[Any]: """simple docstring""" with self.assertRaises(__magic_name__ ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , __magic_name__ ) # Out features must be a list with self.assertRaises(__magic_name__ ): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(__magic_name__ ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(__magic_name__ ): verify_out_features_out_indices(__magic_name__ , 0 , ["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(__magic_name__ ): verify_out_features_out_indices(__magic_name__ , (0, 1) , ["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(__magic_name__ ): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(__magic_name__ ): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(__magic_name__ ): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] ) def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" __snake_case : Tuple = BackboneMixin() __snake_case : Tuple = ["""a""", """b""", """c"""] __snake_case : List[str] = ["""a""", """c"""] __snake_case : Union[str, Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly __snake_case : Optional[Any] = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""] ) self.assertEqual(backbone.out_indices , [0, 1] ) __snake_case : str = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [-3, -1] )
13
'''simple docstring''' import os import numpy import onnx def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Optional[int] = a.name __snake_case : Dict = b.name __snake_case : Optional[int] = """""" __snake_case : int = """""" __snake_case : Any = a == b __snake_case : List[Any] = name_a __snake_case : List[str] = name_b return res def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_lowerCamelCase , _lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Dict = list(model.graph.initializer ) __snake_case : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __snake_case : Tuple = inits[i].name __snake_case : Tuple = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : str = os.path.dirname(_lowerCamelCase ) __snake_case : Dict = os.path.basename(_lowerCamelCase ) __snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) ) __snake_case : Dict = list(model.graph.initializer ) __snake_case : Optional[int] = set() __snake_case : Optional[Any] = {} __snake_case : Tuple = [] __snake_case : List[Any] = 0 for i in range(len(_lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(_lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_lowerCamelCase ) dup_set.add(_lowerCamelCase ) __snake_case : List[Any] = inits[j].data_type __snake_case : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , _lowerCamelCase ) total_reduced_size += mem_size __snake_case : Any = inits[i].name __snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(_lowerCamelCase ) else: __snake_case : Dict = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) __snake_case : int = sorted(_lowerCamelCase ) _remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : str = """optimized_""" + model_file_name __snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) onnx.save(_lowerCamelCase , _lowerCamelCase ) return new_model
13
1
'''simple docstring''' import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : def __init__( self : Dict , __magic_name__ : Optional[int] , __magic_name__ : int=13 , __magic_name__ : Dict=32 , __magic_name__ : List[str]=2 , __magic_name__ : int=3 , __magic_name__ : int=16 , __magic_name__ : str=[32, 64, 1_28] , __magic_name__ : Union[str, Any]=[1, 2, 1] , __magic_name__ : Any=[2, 2, 4] , __magic_name__ : List[Any]=2 , __magic_name__ : List[str]=2.0 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : int="gelu" , __magic_name__ : int=False , __magic_name__ : int=True , __magic_name__ : Union[str, Any]=0.02 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : Any=True , __magic_name__ : int=None , __magic_name__ : str=True , __magic_name__ : int=10 , __magic_name__ : List[str]=8 , __magic_name__ : Optional[Any]=["stage1", "stage2"] , __magic_name__ : Tuple=[1, 2] , ) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = parent __snake_case : str = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[str] = patch_size __snake_case : Tuple = num_channels __snake_case : Optional[Any] = embed_dim __snake_case : str = hidden_sizes __snake_case : str = depths __snake_case : str = num_heads __snake_case : Dict = window_size __snake_case : List[str] = mlp_ratio __snake_case : Dict = qkv_bias __snake_case : Tuple = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : List[Any] = drop_path_rate __snake_case : List[str] = hidden_act __snake_case : Union[str, Any] = use_absolute_embeddings __snake_case : Tuple = patch_norm __snake_case : Union[str, Any] = layer_norm_eps __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = is_training __snake_case : Union[str, Any] = scope __snake_case : Union[str, Any] = use_labels __snake_case : Any = type_sequence_label_size __snake_case : Any = encoder_stride __snake_case : List[str] = out_features __snake_case : Optional[Any] = out_indices def lowercase__ ( self : int ) -> Optional[Any]: """simple docstring""" __snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Optional[int] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : List[Any] = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : int ) -> Dict: """simple docstring""" __snake_case : Dict = FocalNetModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Union[str, Any] = model(__magic_name__ ) __snake_case : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __snake_case : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Dict ) -> Any: """simple docstring""" __snake_case : str = FocalNetBackbone(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : str = model(__magic_name__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None __snake_case : Optional[Any] = None __snake_case : str = FocalNetBackbone(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowercase__ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> Optional[int]: """simple docstring""" __snake_case : Any = FocalNetForMaskedImageModeling(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __snake_case : List[Any] = 1 __snake_case : Tuple = FocalNetForMaskedImageModeling(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __snake_case : Union[str, Any] = model(__magic_name__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> int: """simple docstring""" __snake_case : Union[str, Any] = self.type_sequence_label_size __snake_case : Optional[int] = FocalNetForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Dict = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __snake_case : Optional[int] = 1 __snake_case : Optional[Any] = FocalNetForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __snake_case : Any = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: int = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowercase__: List[str] = ( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) lowercase__: int = False lowercase__: str = False lowercase__: List[Any] = False lowercase__: Optional[int] = False lowercase__: Optional[Any] = False def lowercase__ ( self : int ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = FocalNetModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 , has_text_modality=__magic_name__ ) def lowercase__ ( self : str ) -> List[Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" return def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Any: """simple docstring""" __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ ) def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def lowercase__ ( self : Any ) -> int: """simple docstring""" pass def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: __snake_case : Any = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : str ) -> Union[str, Any]: """simple docstring""" __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: __snake_case : Optional[int] = model_class(__magic_name__ ) __snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int ) -> Any: """simple docstring""" __snake_case : Optional[int] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Any = outputs.hidden_states __snake_case : Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__magic_name__ ) , __magic_name__ ) # FocalNet has a different seq_length __snake_case : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __snake_case : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __snake_case : Any = outputs.reshaped_hidden_states self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case , __snake_case , __snake_case , __snake_case : Dict = reshaped_hidden_states[0].shape __snake_case : List[str] = ( reshaped_hidden_states[0].view(__magic_name__ , __magic_name__ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowercase__ ( self : Any ) -> List[Any]: """simple docstring""" __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: __snake_case : Tuple = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : int = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Dict ) -> str: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = 3 __snake_case : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __snake_case : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __snake_case : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __snake_case : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: __snake_case : List[Any] = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Optional[int] = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) ) @slow def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : List[Any] = FocalNetModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> str: """simple docstring""" __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = _config_zero_init(__magic_name__ ) for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(config=__magic_name__ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def lowercase__ ( self : Any ) -> Tuple: """simple docstring""" __snake_case : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__magic_name__ ) __snake_case : Optional[int] = self.default_image_processor __snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __snake_case : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : int = model(**__magic_name__ ) # verify the logits __snake_case : Union[str, Any] = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Optional[int] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 ) @require_torch class _A ( __lowercase , unittest.TestCase ): lowercase__: int = (FocalNetBackbone,) if is_torch_available() else () lowercase__: str = FocalNetConfig lowercase__: Dict = False def lowercase__ ( self : List[str] ) -> Dict: """simple docstring""" __snake_case : Optional[Any] = FocalNetModelTester(self )
13
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase = ["small", "medium", "large"] __UpperCamelCase = "lm_head.decoder.weight" __UpperCamelCase = "lm_head.weight" def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : Optional[int] = torch.load(_lowerCamelCase ) __snake_case : Optional[int] = d.pop(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) __UpperCamelCase = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") __UpperCamelCase = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
13
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class _A ( metaclass=__lowercase ): lowercase__: Union[str, Any] = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self : Dict , *__magic_name__ : List[str] , **__magic_name__ : Union[str, Any] ) -> Dict: """simple docstring""" requires_backends(self , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def lowercase__ ( cls : Any , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> str: """simple docstring""" requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def lowercase__ ( cls : Union[str, Any] , *__magic_name__ : Dict , **__magic_name__ : Optional[int] ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
13
'''simple docstring''' __UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _a ( ) -> None: """simple docstring""" __snake_case : Dict = input("""Enter message: """ ) __snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ ) __snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): __snake_case : Any = """encrypt""" __snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase ) elif mode.lower().startswith("""d""" ): __snake_case : Optional[int] = """decrypt""" __snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase ) print(F'''\n{mode.title()}ed message:''' ) print(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : str = [] __snake_case : Dict = 0 __snake_case : Optional[int] = key.upper() for symbol in message: __snake_case : Any = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_lowerCamelCase ): __snake_case : Tuple = 0 else: translated.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __UpperCamelCase = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for attribute in key.split(""".""" ): __snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case : Union[str, Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : Tuple = value elif weight_type == "bias": __snake_case : str = value else: __snake_case : List[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = [] __snake_case : List[Any] = fairseq_model.state_dict() __snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : Any = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __snake_case : Dict = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2] __snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase ) if "weight_g" in name: __snake_case : Dict = """weight_g""" elif "weight_v" in name: __snake_case : List[str] = """weight_v""" elif "weight" in name: __snake_case : str = """weight""" elif "bias" in name: __snake_case : int = """bias""" else: __snake_case : int = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Dict = full_name.split("""conv_layers.""" )[-1] __snake_case : Optional[int] = name.split(""".""" ) __snake_case : Dict = int(items[0] ) __snake_case : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case : List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : List[str] = SEWConfig() if is_finetuned: __snake_case : List[Any] = model.wav_encoder.wav_model.cfg else: __snake_case : Optional[Any] = model.cfg __snake_case : Tuple = fs_config.conv_bias __snake_case : List[Any] = eval(fs_config.conv_feature_layers ) __snake_case : List[Any] = [x[0] for x in conv_layers] __snake_case : Dict = [x[1] for x in conv_layers] __snake_case : Tuple = [x[2] for x in conv_layers] __snake_case : List[str] = """gelu""" __snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" __snake_case : Optional[int] = 0.0 __snake_case : Optional[Any] = fs_config.activation_fn.name __snake_case : Dict = fs_config.encoder_embed_dim __snake_case : Dict = 0.02 __snake_case : Any = fs_config.encoder_ffn_embed_dim __snake_case : Tuple = 1E-5 __snake_case : Dict = fs_config.encoder_layerdrop __snake_case : Any = fs_config.encoder_attention_heads __snake_case : int = fs_config.conv_pos_groups __snake_case : Tuple = fs_config.conv_pos __snake_case : Optional[int] = len(_lowerCamelCase ) __snake_case : int = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : Union[str, Any] = model.cfg __snake_case : Tuple = fs_config.final_dropout __snake_case : Tuple = fs_config.layerdrop __snake_case : Any = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : List[Any] = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : str = fs_config.mask_channel_length __snake_case : Any = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : str = fs_config.mask_prob __snake_case : str = """Wav2Vec2FeatureExtractor""" __snake_case : Dict = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int: """simple docstring""" if is_finetuned: __snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase ) else: __snake_case : int = convert_config(model[0] , _lowerCamelCase ) __snake_case : Dict = model[0].eval() __snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __snake_case : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if is_finetuned: if dict_path: __snake_case : str = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Union[str, Any] = target_dict.pad_index __snake_case : Optional[Any] = target_dict.bos_index __snake_case : Tuple = target_dict.pad_index __snake_case : List[str] = target_dict.bos_index __snake_case : Optional[Any] = target_dict.eos_index __snake_case : List[str] = len(target_dict.symbols ) __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" ) if not os.path.isdir(_lowerCamelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _lowerCamelCase ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case : List[str] = SEWForCTC(_lowerCamelCase ) else: __snake_case : List[str] = SEWModel(_lowerCamelCase ) feature_extractor.save_pretrained(_lowerCamelCase ) recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __UpperCamelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
13
1
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> Any: """simple docstring""" model.train() __snake_case : List[Any] = model(_lowerCamelCase ) __snake_case : Any = F.mse_loss(_lowerCamelCase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]: """simple docstring""" set_seed(42 ) __snake_case : Any = RegressionModel() __snake_case : str = deepcopy(_lowerCamelCase ) __snake_case : List[Any] = RegressionDataset(length=80 ) __snake_case : Optional[int] = DataLoader(_lowerCamelCase , batch_size=16 ) model.to(accelerator.device ) if sched: __snake_case : Optional[Any] = AdamW(params=model.parameters() , lr=1E-3 ) __snake_case : Tuple = AdamW(params=ddp_model.parameters() , lr=1E-3 ) __snake_case : Optional[Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase : epoch**0.65 ) __snake_case : Tuple = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase : epoch**0.65 ) # Make a copy of `model` if sched: __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: __snake_case , __snake_case : List[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case , __snake_case : List[str] = get_training_setup(_lowerCamelCase ) # Use a single batch __snake_case , __snake_case : Tuple = next(iter(_lowerCamelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __snake_case , __snake_case : Optional[Any] = accelerator.gather((ddp_input, ddp_target) ) __snake_case , __snake_case : str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase ): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __snake_case : str = ddp_input[torch.randperm(len(_lowerCamelCase ) )] def _a ( _lowerCamelCase ) -> int: """simple docstring""" __snake_case , __snake_case , __snake_case : Dict = get_training_setup(_lowerCamelCase ) # Use a single batch __snake_case , __snake_case : List[Any] = next(iter(_lowerCamelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __snake_case , __snake_case : Tuple = accelerator.gather((ddp_input, ddp_target) ) __snake_case , __snake_case : str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCamelCase ): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: # Sync grads step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __snake_case : Optional[Any] = ddp_input[torch.randperm(len(_lowerCamelCase ) )] def _a ( _lowerCamelCase=False , _lowerCamelCase=False ) -> Tuple: """simple docstring""" __snake_case : int = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __snake_case , __snake_case , __snake_case : Tuple = get_training_setup(_lowerCamelCase ) for iteration, batch in enumerate(_lowerCamelCase ): __snake_case , __snake_case : Union[str, Any] = batch.values() # Gather the distributed inputs and targs for the base model __snake_case , __snake_case : Tuple = accelerator.gather((ddp_input, ddp_target) ) __snake_case , __snake_case : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(_lowerCamelCase ): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __snake_case : List[str] = ddp_input[torch.randperm(len(_lowerCamelCase ) )] GradientState._reset_state() def _a ( _lowerCamelCase=False , _lowerCamelCase=False ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = Accelerator( split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = get_training_setup(_lowerCamelCase , _lowerCamelCase ) for iteration, batch in enumerate(_lowerCamelCase ): __snake_case , __snake_case : List[str] = batch.values() # Gather the distributed inputs and targs for the base model __snake_case , __snake_case : Dict = accelerator.gather((ddp_input, ddp_target) ) __snake_case , __snake_case : Dict = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_lowerCamelCase ): step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' __snake_case : Optional[int] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase )) if accelerator.num_processes > 1: check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = Accelerator() __snake_case : Optional[int] = RegressionDataset(length=80 ) __snake_case : Any = DataLoader(_lowerCamelCase , batch_size=16 ) __snake_case : Union[str, Any] = RegressionDataset(length=96 ) __snake_case : Any = DataLoader(_lowerCamelCase , batch_size=16 ) __snake_case , __snake_case : List[str] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_lowerCamelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase ) if iteration < len(_lowerCamelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_lowerCamelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase ) if batch_num < len(_lowerCamelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = Accelerator() __snake_case : Union[str, Any] = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(_lowerCamelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(_lowerCamelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> Any: """simple docstring""" main() if __name__ == "__main__": main()
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : Optional[int] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( _lowerCamelCase = 5000 ) -> int: """simple docstring""" __snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[int] = pentagonal_nums[j] __snake_case : str = pentagonal_i + pentagonal_j __snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
13
1
'''simple docstring''' __UpperCamelCase = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] __UpperCamelCase = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] __UpperCamelCase = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] __UpperCamelCase = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] __UpperCamelCase = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] __UpperCamelCase = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] __UpperCamelCase = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] __UpperCamelCase = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
13
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : List[Any] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __snake_case : int = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __snake_case : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above __snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __snake_case : Dict = output[output != -float("""inf""" )] __snake_case : Optional[Any] = tf.cast( tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @require_tf class _A ( unittest.TestCase , __lowercase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase__: Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" __snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Optional[int] = 2 __snake_case : str = 2 class _A ( tf.Module ): def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Dict = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" __snake_case : Tuple = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : int = [[2, 0], [1_02, 1_03]] __snake_case : Tuple = [[1, 0], [1, 1]] __snake_case : Union[str, Any] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for batch_size in range(1 , len(__magic_name__ ) + 1 ): __snake_case : Union[str, Any] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } __snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""] __snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Dict = 1 __snake_case : int = 2 class _A ( tf.Module ): def __init__( self : Tuple , __magic_name__ : List[str] ) -> int: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : Union[str, Any] = [[2], [1_02, 1_03]] __snake_case : Tuple = [[1], [1, 1]] __snake_case : List[str] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for input_row in range(len(__magic_name__ ) ): __snake_case : Tuple = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } __snake_case : str = serving_func(**__magic_name__ )["""sequences"""] __snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow @require_tensorflow_text def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ ) class _A ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ) -> int: """simple docstring""" super().__init__() __snake_case : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() ) __snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ ) __snake_case , __snake_case : List[Any] = text.pad_model_inputs( __magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ ) return self.tokenizer.detokenize(__magic_name__ ) __snake_case : int = CompleteSentenceTransformer() __snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) __snake_case : Tuple = complete_model(__magic_name__ ) __snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ ) keras_model.save(__magic_name__ ) def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } __snake_case : str = 14 __snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : int = """Hello, my dog is cute and""" __snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" ) __snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : List[Any] = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __snake_case : Dict = [6_38, 1_98] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : str = """Hugging Face is a technology company based in New York and Paris.""" __snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids __snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : int = bart_model.generate(__magic_name__ ).numpy() class _A ( __lowercase ): def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) ) class _A ( bart_model.model.encoder.__class__ ): def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) __snake_case : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __snake_case : Dict = bart_model.generate(__magic_name__ ).numpy() with self.assertRaises(__magic_name__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__magic_name__ , foo="""bar""" )
13
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] __UpperCamelCase = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def _a ( _lowerCamelCase ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" ) return sd def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=rename_keys_prefix ) -> str: """simple docstring""" __snake_case : Optional[Any] = OrderedDict() __snake_case : Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __snake_case : Optional[int] = key for name_pair in rename_keys_prefix: __snake_case : str = new_key.replace(name_pair[0] , name_pair[1] ) __snake_case : List[str] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __snake_case : List[Any] = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" assert ( checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: __snake_case : Tuple = """pretraining""" if "vcr" in checkpoint_path: __snake_case : Tuple = {"""visual_embedding_dim""": 512} elif "vqa_advanced" in checkpoint_path: __snake_case : int = {"""visual_embedding_dim""": 2048} elif "vqa" in checkpoint_path: __snake_case : Optional[int] = {"""visual_embedding_dim""": 2048} elif "nlvr" in checkpoint_path: __snake_case : str = {"""visual_embedding_dim""": 1024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: __snake_case : str = {"""visual_embedding_dim""": 512} __snake_case : Dict = """multichoice""" elif "vqa_advanced" in checkpoint_path: __snake_case : int = {"""visual_embedding_dim""": 2048} __snake_case : Dict = """vqa_advanced""" elif "vqa" in checkpoint_path: __snake_case : Dict = {"""visual_embedding_dim""": 2048, """num_labels""": 3129} __snake_case : List[Any] = """vqa""" elif "nlvr" in checkpoint_path: __snake_case : List[Any] = { """visual_embedding_dim""": 1024, """num_labels""": 2, } __snake_case : int = """nlvr""" __snake_case : Optional[Any] = VisualBertConfig(**_lowerCamelCase ) # Load State Dict __snake_case : List[Any] = load_state_dict(_lowerCamelCase ) __snake_case : Tuple = get_new_dict(_lowerCamelCase , _lowerCamelCase ) if model_type == "pretraining": __snake_case : Tuple = VisualBertForPreTraining(_lowerCamelCase ) elif model_type == "vqa": __snake_case : List[str] = VisualBertForQuestionAnswering(_lowerCamelCase ) elif model_type == "nlvr": __snake_case : Tuple = VisualBertForVisualReasoning(_lowerCamelCase ) elif model_type == "multichoice": __snake_case : Optional[Any] = VisualBertForMultipleChoice(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) # Save Checkpoints Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") __UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
13
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" __snake_case : int = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def _a ( _lowerCamelCase ) -> None: """simple docstring""" __snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("""""" ) print(len(_lowerCamelCase ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
1
'''simple docstring''' def _a ( ) -> int: """simple docstring""" __snake_case : Optional[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] __snake_case : Union[str, Any] = 6 __snake_case : int = 1 __snake_case : List[str] = 1901 __snake_case : Union[str, Any] = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 __snake_case : List[Any] = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 __snake_case : Dict = day - 29 else: if day > days_per_month[month - 1]: month += 1 __snake_case : Dict = day - days_per_month[month - 2] if month > 12: year += 1 __snake_case : Union[str, Any] = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
13
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCamelCase = logging.getLogger(__name__) class _A ( __lowercase ): def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" super().__init__( __magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , ) __snake_case : List[str] = None def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]: """simple docstring""" logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __snake_case : List[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __snake_case : List[str] = str(distributed_port + 1 ) __snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase__ ( self : int ) -> int: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ ) dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group ) return target_tensor def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ ) return ifname def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): __snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ ) # distributed training __snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic __snake_case : Tuple = None if self._is_main(): __snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )] dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group ) # scatter logic __snake_case : Optional[int] = question_hidden_states.shape[0] __snake_case : Optional[Any] = [] __snake_case : Any = [] if self._is_main(): assert len(__magic_name__ ) == world_size __snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ ) __snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa ) __snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
13
1
'''simple docstring''' # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") class _A : def __init__( self : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : bool = True , __magic_name__ : bool = False ) -> Optional[int]: """simple docstring""" __snake_case : Dict = scheduler __snake_case : Any = optimizers if isinstance(__magic_name__ , (list, tuple) ) else [optimizers] __snake_case : Any = split_batches __snake_case : Dict = step_with_optimizer __snake_case : str = GradientState() def lowercase__ ( self : Optional[Any] , *__magic_name__ : Optional[Any] , **__magic_name__ : Optional[Any] ) -> Tuple: """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__magic_name__ , **__magic_name__ ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__magic_name__ , **__magic_name__ ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step __snake_case : str = AcceleratorState().num_processes for _ in range(__magic_name__ ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , """total_steps""" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__magic_name__ , **__magic_name__ ) else: self.scheduler.step(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return self.scheduler.get_last_lr() def lowercase__ ( self : Any ) -> Optional[Any]: """simple docstring""" return self.scheduler.state_dict() def lowercase__ ( self : Optional[int] , __magic_name__ : str ) -> int: """simple docstring""" self.scheduler.load_state_dict(__magic_name__ ) def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" return self.scheduler.get_lr() def lowercase__ ( self : Optional[int] , *__magic_name__ : Union[str, Any] , **__magic_name__ : int ) -> str: """simple docstring""" return self.scheduler.print_lr(*__magic_name__ , **__magic_name__ )
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class _A : lowercase__: str lowercase__: Optional[str] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return self.major, self.minor, self.patch def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return Version(__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): return other raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' ) def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]: """simple docstring""" try: __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) return self.tuple < other.tuple def __hash__( self : Any ) -> Any: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase__ ( self : str ) -> str: """simple docstring""" return self.version_str def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase ) if not res: raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" return ".".join(str(_lowerCamelCase ) for v in version_tuple )
13
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class _A ( __lowercase ): lowercase__: int = '''ctrl''' lowercase__: Optional[Any] = ['''past_key_values'''] lowercase__: Any = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : str , __magic_name__ : Optional[int]=24_65_34 , __magic_name__ : List[Any]=2_56 , __magic_name__ : List[Any]=12_80 , __magic_name__ : str=81_92 , __magic_name__ : Optional[int]=48 , __magic_name__ : int=16 , __magic_name__ : Tuple=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Union[str, Any]=1E-6 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : Any=True , **__magic_name__ : Optional[int] , ) -> Tuple: """simple docstring""" __snake_case : Any = vocab_size __snake_case : Any = n_positions __snake_case : Optional[int] = n_embd __snake_case : List[str] = n_layer __snake_case : int = n_head __snake_case : Dict = dff __snake_case : Optional[int] = resid_pdrop __snake_case : Dict = embd_pdrop __snake_case : List[Any] = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Any = use_cache super().__init__(**__magic_name__ )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) __snake_case : Tuple = """""" while len(_lowerCamelCase ) % 3 != 0: __snake_case : Any = """0""" + bin_string __snake_case : Tuple = [ bin_string[index : index + 3] for index in range(len(_lowerCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __snake_case : Tuple = 0 for index, val in enumerate(_lowerCamelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) ) oct_string += str(_lowerCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ConditionalDetrFeatureExtractor"] __UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = FlaxAutoencoderKL @property def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = 4 __snake_case : int = 3 __snake_case : Any = (32, 32) __snake_case : Tuple = jax.random.PRNGKey(0 ) __snake_case : Dict = jax.random.uniform(__magic_name__ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowercase__ ( self : List[Any] ) -> int: """simple docstring""" __snake_case : List[str] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } __snake_case : Dict = self.dummy_input return init_dict, inputs_dict
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
1
'''simple docstring''' import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = BertJapaneseTokenizer lowercase__: Any = False lowercase__: List[Any] = True def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" super().setUp() __snake_case : int = [ """[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは""", """世界""", """##世界""", """、""", """##、""", """。""", """##。""", ] __snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = """こんにちは、世界。 \nこんばんは、世界。""" __snake_case : str = """こんにちは 、 世界 。 こんばんは 、 世界 。""" return input_text, output_text def lowercase__ ( self : Tuple , __magic_name__ : str ) -> Optional[Any]: """simple docstring""" __snake_case , __snake_case : List[str] = self.get_input_output_texts(__magic_name__ ) __snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Optional[int] = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) return text, ids def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" pass # TODO add if relevant def lowercase__ ( self : Dict ) -> Dict: """simple docstring""" pass # TODO add if relevant def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass # TODO add if relevant def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = self.tokenizer_class(self.vocab_file ) __snake_case : str = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" ) self.assertListEqual(__magic_name__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" ) self.assertIsNotNone(__magic_name__ ) __snake_case : int = """こんにちは、世界。\nこんばんは、世界。""" __snake_case : List[str] = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) __snake_case : Optional[int] = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(__magic_name__ , """wb""" ) as handle: pickle.dump(__magic_name__ , __magic_name__ ) with open(__magic_name__ , """rb""" ) as handle: __snake_case : Tuple = pickle.load(__magic_name__ ) __snake_case : Dict = tokenizer_new.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowercase__ ( self : Any ) -> Dict: """simple docstring""" __snake_case : Tuple = MecabTokenizer(mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" try: __snake_case : List[str] = MecabTokenizer(mecab_dic="""unidic_lite""" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" try: __snake_case : Optional[Any] = MecabTokenizer(mecab_dic="""unidic""" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : str = MecabTokenizer(do_lower_case=__magic_name__ , mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowercase__ ( self : str ) -> Union[str, Any]: """simple docstring""" try: __snake_case : Optional[Any] = MecabTokenizer( do_lower_case=__magic_name__ , normalize_text=__magic_name__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) def lowercase__ ( self : Optional[Any] ) -> Any: """simple docstring""" __snake_case : List[Any] = MecabTokenizer(normalize_text=__magic_name__ , mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , ) @require_sudachi def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" ) self.assertIsNotNone(__magic_name__ ) __snake_case : Any = """こんにちは、世界。\nこんばんは、世界。""" __snake_case : List[Any] = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) __snake_case : Tuple = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(__magic_name__ , """wb""" ) as handle: pickle.dump(__magic_name__ , __magic_name__ ) with open(__magic_name__ , """rb""" ) as handle: __snake_case : int = pickle.load(__magic_name__ ) __snake_case : str = tokenizer_new.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) @require_sudachi def lowercase__ ( self : str ) -> Any: """simple docstring""" __snake_case : List[Any] = SudachiTokenizer(sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def lowercase__ ( self : Optional[Any] ) -> Any: """simple docstring""" __snake_case : str = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] ) @require_sudachi def lowercase__ ( self : int ) -> Optional[int]: """simple docstring""" __snake_case : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] ) @require_sudachi def lowercase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] ) @require_sudachi def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" __snake_case : int = SudachiTokenizer(do_lower_case=__magic_name__ , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Optional[Any] = SudachiTokenizer(normalize_text=__magic_name__ , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , ) @require_sudachi def lowercase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = SudachiTokenizer(trim_whitespace=__magic_name__ , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) @require_jumanpp def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" ) self.assertIsNotNone(__magic_name__ ) __snake_case : int = """こんにちは、世界。\nこんばんは、世界。""" __snake_case : int = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) __snake_case : Tuple = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(__magic_name__ , """wb""" ) as handle: pickle.dump(__magic_name__ , __magic_name__ ) with open(__magic_name__ , """rb""" ) as handle: __snake_case : Tuple = pickle.load(__magic_name__ ) __snake_case : Any = tokenizer_new.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) @require_jumanpp def lowercase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : Any = JumanppTokenizer(do_lower_case=__magic_name__ ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def lowercase__ ( self : List[str] ) -> str: """simple docstring""" __snake_case : Tuple = JumanppTokenizer(normalize_text=__magic_name__ ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __snake_case : List[Any] = JumanppTokenizer(trim_whitespace=__magic_name__ ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , ) @require_jumanpp def lowercase__ ( self : int ) -> Tuple: """simple docstring""" __snake_case : Optional[int] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , ) def lowercase__ ( self : Optional[int] ) -> int: """simple docstring""" __snake_case : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""] __snake_case : str = {} for i, token in enumerate(__magic_name__ ): __snake_case : Any = i __snake_case : Any = WordpieceTokenizer(vocab=__magic_name__ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] ) self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] ) self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" ) __snake_case : str = tokenizer.subword_tokenizer __snake_case : List[Any] = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" ) self.assertListEqual(__magic_name__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] ) __snake_case : Tuple = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" ) self.assertListEqual(__magic_name__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Dict = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" ) __snake_case : Tuple = tokenizer.encode("""ありがとう。""" , add_special_tokens=__magic_name__ ) __snake_case : Dict = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__magic_name__ ) __snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) __snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class _A ( __lowercase , unittest.TestCase ): lowercase__: List[str] = BertJapaneseTokenizer lowercase__: Union[str, Any] = False def lowercase__ ( self : Any ) -> int: """simple docstring""" super().setUp() __snake_case : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""] __snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> str: """simple docstring""" return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **__magic_name__ ) def lowercase__ ( self : int , __magic_name__ : Tuple ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = """こんにちは、世界。 \nこんばんは、世界。""" __snake_case : List[str] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。""" return input_text, output_text def lowercase__ ( self : int ) -> str: """simple docstring""" pass # TODO add if relevant def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" pass # TODO add if relevant def lowercase__ ( self : int ) -> Optional[Any]: """simple docstring""" pass # TODO add if relevant def lowercase__ ( self : Any ) -> Optional[Any]: """simple docstring""" __snake_case : List[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" ) __snake_case : Tuple = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" ) self.assertListEqual( __magic_name__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def lowercase__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""] __snake_case : Optional[Any] = {} for i, token in enumerate(__magic_name__ ): __snake_case : int = i __snake_case : str = CharacterTokenizer(vocab=__magic_name__ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] ) self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] ) def lowercase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : int = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" ) __snake_case : List[str] = tokenizer.encode("""ありがとう。""" , add_special_tokens=__magic_name__ ) __snake_case : Tuple = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) __snake_case : Any = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : Tuple = """cl-tohoku/bert-base-japanese""" __snake_case : Optional[Any] = AutoTokenizer.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) class _A ( unittest.TestCase ): def lowercase__ ( self : int ) -> Any: """simple docstring""" __snake_case : str = """cl-tohoku/bert-base-japanese""" with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm: BertTokenizer.from_pretrained(__magic_name__ ) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""" ) ) __snake_case : Optional[Any] = """bert-base-cased""" with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm: BertJapaneseTokenizer.from_pretrained(__magic_name__ ) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""" ) )
13
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = image_size __snake_case : Tuple = patch_size __snake_case : str = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : Dict = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : List[Any] = shape_size __snake_case : Tuple = num_labels __snake_case : List[Any] = num_choices __snake_case : Optional[Any] = scope __snake_case : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : List[str] = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __snake_case : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Union[str, Any] = bbox[i, j, 3] __snake_case : Union[str, Any] = bbox[i, j, 1] __snake_case : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Optional[Any] = bbox[i, j, 2] __snake_case : Tuple = bbox[i, j, 0] __snake_case : Optional[Any] = tmp_coordinate __snake_case : Dict = tf.constant(__magic_name__ ) __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ ) # text + image __snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) __snake_case : List[str] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , ) __snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Any = self.num_labels __snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ ) __snake_case : Tuple = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = 2 __snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs __snake_case : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase__: Union[str, Any] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase__: Dict = False lowercase__: int = False lowercase__: Dict = False def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" return True def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict: """simple docstring""" __snake_case : Any = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = { k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : str = TFLayoutLMvaModelTester(self ) __snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ): # The number of elements in the loss should be the same as the number of elements in the label __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0] ] __snake_case : List[str] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = prepared_for_class.pop("""input_ids""" ) __snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : str = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __snake_case : str = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __snake_case : Dict = -1_00 __snake_case : str = tf.convert_to_tensor(__magic_name__ ) __snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = model(__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) # Get keys that were added with the _prepare_for_class function __snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys() __snake_case : Optional[Any] = inspect.signature(model.call ).parameters __snake_case : int = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __snake_case : Union[str, Any] = {0: """input_ids"""} for label_key in label_keys: __snake_case : int = signature_names.index(__magic_name__ ) __snake_case : Optional[int] = label_key __snake_case : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __snake_case : List[str] = prepared_for_class[value] __snake_case : str = tuple(__magic_name__ ) # Send to model __snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Tuple = type self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __snake_case : str = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values __snake_case : Tuple = tf.constant([[1, 2]] ) __snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) # verify the logits __snake_case : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Tuple = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None: """simple docstring""" if start is None: __snake_case : Optional[Any] = 0 if end is None: __snake_case : int = len(_lowerCamelCase ) - 1 if start >= end: return __snake_case : str = (start + end) // 2 slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase ) if sequence[end] < sequence[mid]: __snake_case , __snake_case : List[str] = sequence[mid], sequence[end] slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
13
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]: """simple docstring""" __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[Any] = num_channels __snake_case : List[str] = patch_size __snake_case : List[str] = num_frames __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : str = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = attention_type __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scope __snake_case : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case : str = (image_size // patch_size) ** 2 __snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case : str = self.num_labels return config def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" __snake_case : Optional[int] = TimesformerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str: """simple docstring""" __snake_case : Any = TimesformerForVideoClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) # verify the logits shape __snake_case : Dict = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__: List[Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__: List[str] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : List[str] = TimesformerModelTester(self ) __snake_case : List[Any] = ConfigTester( self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int: """simple docstring""" __snake_case : Dict = copy.deepcopy(__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = TimesformerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : List[str] = self.model_tester.seq_length __snake_case : Tuple = self.model_tester.num_frames __snake_case : str = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Dict = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Optional[int] = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case : int = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Optional[int] = True __snake_case : Optional[int] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : List[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ): __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.hidden_states __snake_case : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : List[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( __magic_name__ ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : Dict = prepare_video() __snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __UpperCamelCase = logging.get_logger(__name__) class _A ( __lowercase ): def __init__( self : Tuple , *__magic_name__ : str , **__magic_name__ : Optional[Any] ) -> None: """simple docstring""" warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , __magic_name__ , ) super().__init__(*__magic_name__ , **__magic_name__ )
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ConditionalDetrFeatureExtractor"] __UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]: """simple docstring""" __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[Any] = num_channels __snake_case : List[str] = patch_size __snake_case : List[str] = num_frames __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : str = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = attention_type __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scope __snake_case : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case : str = (image_size // patch_size) ** 2 __snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case : str = self.num_labels return config def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" __snake_case : Optional[int] = TimesformerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str: """simple docstring""" __snake_case : Any = TimesformerForVideoClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) # verify the logits shape __snake_case : Dict = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__: List[Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__: List[str] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : List[str] = TimesformerModelTester(self ) __snake_case : List[Any] = ConfigTester( self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int: """simple docstring""" __snake_case : Dict = copy.deepcopy(__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = TimesformerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : List[str] = self.model_tester.seq_length __snake_case : Tuple = self.model_tester.num_frames __snake_case : str = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Dict = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Optional[int] = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case : int = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Optional[int] = True __snake_case : Optional[int] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : List[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ): __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.hidden_states __snake_case : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : List[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( __magic_name__ ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : Dict = prepare_video() __snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : str = 0 __snake_case : Optional[int] = len(_lowerCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" if len(_lowerCamelCase ) <= 1: return arr, 0 __snake_case : Any = len(_lowerCamelCase ) // 2 __snake_case : List[str] = arr[0:mid] __snake_case : int = arr[mid:] __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase ) __snake_case : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = [] __snake_case : List[str] = 0 while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , _lowerCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __snake_case : Any = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) # an empty list should also have zero inversions __snake_case : List[Any] = [] __snake_case : List[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' from __future__ import annotations class _A : def __init__( self : str , __magic_name__ : Optional[Any]=None ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = data __snake_case : int = None def __repr__( self : Any ) -> List[Any]: """simple docstring""" __snake_case : List[Any] = [] __snake_case : List[str] = self while temp: string_rep.append(f'''{temp.data}''' ) __snake_case : int = temp.next return "->".join(__magic_name__ ) def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not elements_list: raise Exception("""The Elements List is empty""" ) __snake_case : List[Any] = Node(elements_list[0] ) for i in range(1 , len(_lowerCamelCase ) ): __snake_case : List[Any] = Node(elements_list[i] ) __snake_case : Union[str, Any] = current.next return head def _a ( _lowerCamelCase ) -> None: """simple docstring""" if head_node is not None and isinstance(_lowerCamelCase , _lowerCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def _a ( ) -> Any: """simple docstring""" from doctest import testmod testmod() __snake_case : Dict = make_linked_list([14, 52, 14, 12, 43] ) print("""Linked List:""" ) print(_lowerCamelCase ) print("""Elements in Reverse:""" ) print_reverse(_lowerCamelCase ) if __name__ == "__main__": main()
13
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
13
1
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _A ( unittest.TestCase ): def lowercase__ ( self : Any ) -> Optional[Any]: """simple docstring""" __snake_case : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : str = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ ) __snake_case : str = -1 __snake_case : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ ) __snake_case : int = model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __snake_case : Union[str, Any] = TextStreamer(__magic_name__ ) model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ , streamer=__magic_name__ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __snake_case : Dict = cs.out[:-1] self.assertEqual(__magic_name__ , __magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" __snake_case : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : str = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ ) __snake_case : Any = -1 __snake_case : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ ) __snake_case : Optional[Any] = model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ ) __snake_case : Dict = tokenizer.decode(greedy_ids[0] ) __snake_case : List[Any] = TextIteratorStreamer(__magic_name__ ) __snake_case : Dict = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} __snake_case : List[str] = Thread(target=model.generate , kwargs=__magic_name__ ) thread.start() __snake_case : Dict = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(__magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ ) __snake_case : int = -1 __snake_case : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ ) __snake_case : Optional[int] = model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ ) __snake_case : Any = greedy_ids[:, input_ids.shape[1] :] __snake_case : Any = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __snake_case : str = TextStreamer(__magic_name__ , skip_prompt=__magic_name__ ) model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ , streamer=__magic_name__ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __snake_case : str = cs.out[:-1] self.assertEqual(__magic_name__ , __magic_name__ ) def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : List[str] = AutoTokenizer.from_pretrained("""distilgpt2""" ) __snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__magic_name__ ) __snake_case : Any = -1 __snake_case : Union[str, Any] = torch.ones((1, 5) , device=__magic_name__ ).long() * model.config.bos_token_id with CaptureStdout() as cs: __snake_case : Any = TextStreamer(__magic_name__ , skip_special_tokens=__magic_name__ ) model.generate(__magic_name__ , max_new_tokens=1 , do_sample=__magic_name__ , streamer=__magic_name__ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __snake_case : List[Any] = cs.out[:-1] # Remove the final "\n" __snake_case : str = tokenizer(__magic_name__ , return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def lowercase__ ( self : str ) -> Optional[Any]: """simple docstring""" __snake_case : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ ) __snake_case : Union[str, Any] = -1 __snake_case : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ ) __snake_case : str = TextIteratorStreamer(__magic_name__ , timeout=0.001 ) __snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} __snake_case : int = Thread(target=model.generate , kwargs=__magic_name__ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__magic_name__ ): __snake_case : Tuple = """""" for new_text in streamer: streamer_text += new_text
13
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = CanineTokenizer lowercase__: Optional[int] = False def lowercase__ ( self : Any ) -> Any: """simple docstring""" super().setUp() __snake_case : Dict = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer: """simple docstring""" __snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) __snake_case : Optional[Any] = 10_24 return tokenizer @require_torch def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[Any] = self.canine_tokenizer __snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off __snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Any = self.canine_tokenizer __snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] __snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.canine_tokenizer __snake_case : Optional[Any] = [ """What's the weater?""", """It's about 25 degrees.""", ] __snake_case : Any = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Dict = tempfile.mkdtemp() __snake_case : str = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) __snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Optional[int] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __snake_case : List[Any] = chr(0xE007 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE005 __snake_case : Tuple = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) __snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) __snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) __snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) __snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : Dict = chr(0xE005 ) __snake_case : str = chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) __snake_case : Tuple = tokenizer.tokenize(__magic_name__ ) __snake_case : Any = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __snake_case : Optional[Any] = 0xE006 __snake_case : List[str] = chr(__magic_name__ ) __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Any = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Tuple = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE006 __snake_case : int = chr(__magic_name__ ) __snake_case : List[Any] = [new_token_a] __snake_case : Union[str, Any] = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __snake_case : Any = 0xE007 __snake_case : Any = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] __snake_case : Union[str, Any] = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : List[str] = """hello world""" if self.space_between_special_tokens: __snake_case : Union[str, Any] = """[CLS] hello world [SEP]""" else: __snake_case : List[Any] = input __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowercase__ ( self : Tuple ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : str = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __snake_case : Dict = """a""" __snake_case : Tuple = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) __snake_case : Dict = 0xE006 __snake_case : str = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" pass def lowercase__ ( self : str ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass
13
1
'''simple docstring''' def _a ( _lowerCamelCase ) -> list[list[int]]: """simple docstring""" __snake_case : List[Any] = [] if len(_lowerCamelCase ) == 1: return [nums.copy()] for _ in range(len(_lowerCamelCase ) ): __snake_case : int = nums.pop(0 ) __snake_case : Optional[int] = permute(_lowerCamelCase ) for perm in permutations: perm.append(_lowerCamelCase ) result.extend(_lowerCamelCase ) nums.append(_lowerCamelCase ) return result def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" def backtrack(_lowerCamelCase ): if start == len(_lowerCamelCase ) - 1: output.append(nums[:] ) else: for i in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case , __snake_case : Tuple = nums[i], nums[start] backtrack(start + 1 ) __snake_case , __snake_case : Any = nums[i], nums[start] # backtrack __snake_case : str = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function __UpperCamelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
13
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
13
1
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __UpperCamelCase = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
13
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class _A ( __lowercase ): lowercase__: str = '''codegen''' lowercase__: Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int: """simple docstring""" __snake_case : List[str] = vocab_size __snake_case : Union[str, Any] = n_ctx __snake_case : int = n_positions __snake_case : str = n_embd __snake_case : Dict = n_layer __snake_case : List[Any] = n_head __snake_case : Any = n_inner __snake_case : str = rotary_dim __snake_case : List[str] = activation_function __snake_case : Tuple = resid_pdrop __snake_case : Dict = embd_pdrop __snake_case : int = attn_pdrop __snake_case : Tuple = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = use_cache __snake_case : Dict = bos_token_id __snake_case : Union[str, Any] = eos_token_id super().__init__( bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ ) class _A ( __lowercase ): def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple: """simple docstring""" super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ ) if not getattr(self._config , """pad_token_id""" , __magic_name__ ): # TODO: how to do that better? __snake_case : List[str] = 0 @property def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) __snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: __snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return self._config.n_layer @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.n_head def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() __snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __snake_case , __snake_case : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __snake_case : Tuple = seqlen + 2 __snake_case : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : List[str] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] __snake_case : Optional[int] = common_inputs["""attention_mask"""] if self.use_past: __snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype __snake_case : Optional[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return 13
13
1
'''simple docstring''' import colorsys from PIL import Image # type: ignore def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float: """simple docstring""" __snake_case : List[str] = x __snake_case : Optional[Any] = y for step in range(_lowerCamelCase ): # noqa: B007 __snake_case : Tuple = a * a - b * b + x __snake_case : List[Any] = 2 * a * b + y __snake_case : Dict = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _a ( _lowerCamelCase ) -> tuple: """simple docstring""" if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def _a ( _lowerCamelCase ) -> tuple: """simple docstring""" if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowerCamelCase , 1 , 1 ) ) def _a ( _lowerCamelCase = 800 , _lowerCamelCase = 600 , _lowerCamelCase = -0.6 , _lowerCamelCase = 0 , _lowerCamelCase = 3.2 , _lowerCamelCase = 50 , _lowerCamelCase = True , ) -> Image.Image: """simple docstring""" __snake_case : List[Any] = Image.new("""RGB""" , (image_width, image_height) ) __snake_case : Optional[int] = img.load() # loop through the image-coordinates for image_x in range(_lowerCamelCase ): for image_y in range(_lowerCamelCase ): # determine the figure-coordinates based on the image-coordinates __snake_case : Tuple = figure_width / image_width * image_height __snake_case : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width __snake_case : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height __snake_case : Any = get_distance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: __snake_case : Union[str, Any] = get_color_coded_rgb(_lowerCamelCase ) else: __snake_case : str = get_black_and_white_rgb(_lowerCamelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure __UpperCamelCase = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
13
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( __lowercase , unittest.TestCase ): lowercase__: int = KandinskyImgaImgPipeline lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] lowercase__: int = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] lowercase__: List[Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowercase__: Any = False @property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return 32 @property def lowercase__ ( self : str ) -> str: """simple docstring""" return 32 @property def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" return self.time_input_dim @property def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return 1_00 @property def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" __snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) __snake_case : Tuple = MultilingualCLIP(__magic_name__ ) __snake_case : Optional[Any] = text_encoder.eval() return text_encoder @property def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __snake_case : Tuple = UNetaDConditionModel(**__magic_name__ ) return model @property def lowercase__ ( self : str ) -> Dict: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : Tuple = self.dummy_text_encoder __snake_case : Dict = self.dummy_tokenizer __snake_case : Dict = self.dummy_unet __snake_case : int = self.dummy_movq __snake_case : List[Any] = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __snake_case : Dict = DDIMScheduler(**__magic_name__ ) __snake_case : Any = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str: """simple docstring""" __snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ ) # create init_image __snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__magic_name__ ).startswith("""mps""" ): __snake_case : str = torch.manual_seed(__magic_name__ ) else: __snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : Optional[Any] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : Dict = """cpu""" __snake_case : Union[str, Any] = self.get_dummy_components() __snake_case : List[str] = self.pipeline_class(**__magic_name__ ) __snake_case : Optional[Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) ) __snake_case : List[str] = output.images __snake_case : Any = pipe( **self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0] __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : int = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) __snake_case : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __snake_case : List[Any] = """A red cartoon frog, 4k""" __snake_case : str = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__magic_name__ ) __snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) __snake_case : Any = pipeline.to(__magic_name__ ) pipeline.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case , __snake_case : Optional[Any] = pipe_prior( __magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __snake_case : List[str] = pipeline( __magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) __snake_case : Dict = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
13
1
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCamelCase = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" config.addinivalue_line( """markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" ) config.addinivalue_line( """markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" ) config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" ) config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" ) config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" ) config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" ) def _a ( _lowerCamelCase ) -> List[Any]: """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main __snake_case : Dict = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" if exitstatus == 5: __snake_case : List[Any] = 0 # Doctest custom flag to ignore output. __UpperCamelCase = doctest.register_optionflag("IGNORE_RESULT") __UpperCamelCase = doctest.OutputChecker class _A ( __lowercase ): def lowercase__ ( self : int , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> List[Any]: """simple docstring""" if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , __magic_name__ , __magic_name__ , __magic_name__ ) __UpperCamelCase = CustomOutputChecker __UpperCamelCase = HfDoctestModule __UpperCamelCase = HfDocTestParser
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __UpperCamelCase = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __UpperCamelCase = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } class _A ( __lowercase ): lowercase__: Any = VOCAB_FILES_NAMES lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask'''] lowercase__: List[str] = BartTokenizer def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]: """simple docstring""" super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) __snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) ) __snake_case : str = add_prefix_space __snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ ) __snake_case : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __snake_case : Any = """post_processor""" __snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: __snake_case : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __snake_case : Tuple = tuple(state["""sep"""] ) if "cls" in state: __snake_case : int = tuple(state["""cls"""] ) __snake_case : Optional[int] = False if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : Optional[Any] = add_prefix_space __snake_case : List[str] = True if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets: __snake_case : Optional[int] = trim_offsets __snake_case : Any = True if changes_to_apply: __snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) ) __snake_case : List[Any] = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def lowercase__ ( self : List[Any] ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value __snake_case : Union[str, Any] = value def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding: """simple docstring""" __snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding: """simple docstring""" __snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Optional[int] = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
13
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "facebook/deit-base-distilled-patch16-224": ( "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class _A ( __lowercase ): lowercase__: str = '''deit''' def __init__( self : Any , __magic_name__ : str=7_68 , __magic_name__ : str=12 , __magic_name__ : List[Any]=12 , __magic_name__ : int=30_72 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Dict=0.0 , __magic_name__ : List[str]=0.0 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : Union[str, Any]=1E-12 , __magic_name__ : Tuple=2_24 , __magic_name__ : List[str]=16 , __magic_name__ : int=3 , __magic_name__ : Any=True , __magic_name__ : Dict=16 , **__magic_name__ : Optional[Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__(**__magic_name__ ) __snake_case : str = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : Optional[int] = num_attention_heads __snake_case : List[Any] = intermediate_size __snake_case : Optional[Any] = hidden_act __snake_case : Tuple = hidden_dropout_prob __snake_case : Dict = attention_probs_dropout_prob __snake_case : Optional[int] = initializer_range __snake_case : Dict = layer_norm_eps __snake_case : Optional[int] = image_size __snake_case : str = patch_size __snake_case : List[Any] = num_channels __snake_case : Dict = qkv_bias __snake_case : int = encoder_stride class _A ( __lowercase ): lowercase__: List[Any] = version.parse('''1.11''' ) @property def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowercase__ ( self : Optional[int] ) -> float: """simple docstring""" return 1E-4
13
'''simple docstring''' import os import numpy import onnx def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Optional[int] = a.name __snake_case : Dict = b.name __snake_case : Optional[int] = """""" __snake_case : int = """""" __snake_case : Any = a == b __snake_case : List[Any] = name_a __snake_case : List[str] = name_b return res def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_lowerCamelCase , _lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Dict = list(model.graph.initializer ) __snake_case : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __snake_case : Tuple = inits[i].name __snake_case : Tuple = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : str = os.path.dirname(_lowerCamelCase ) __snake_case : Dict = os.path.basename(_lowerCamelCase ) __snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) ) __snake_case : Dict = list(model.graph.initializer ) __snake_case : Optional[int] = set() __snake_case : Optional[Any] = {} __snake_case : Tuple = [] __snake_case : List[Any] = 0 for i in range(len(_lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(_lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_lowerCamelCase ) dup_set.add(_lowerCamelCase ) __snake_case : List[Any] = inits[j].data_type __snake_case : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , _lowerCamelCase ) total_reduced_size += mem_size __snake_case : Any = inits[i].name __snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(_lowerCamelCase ) else: __snake_case : Dict = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) __snake_case : int = sorted(_lowerCamelCase ) _remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : str = """optimized_""" + model_file_name __snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) onnx.save(_lowerCamelCase , _lowerCamelCase ) return new_model
13
1
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : Optional[int] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( _lowerCamelCase = 5000 ) -> int: """simple docstring""" __snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[int] = pentagonal_nums[j] __snake_case : str = pentagonal_i + pentagonal_j __snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
13
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase = ["small", "medium", "large"] __UpperCamelCase = "lm_head.decoder.weight" __UpperCamelCase = "lm_head.weight" def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : Optional[int] = torch.load(_lowerCamelCase ) __snake_case : Optional[int] = d.pop(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) __UpperCamelCase = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") __UpperCamelCase = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
13
1
'''simple docstring''' def _a ( _lowerCamelCase = 1000 ) -> int: """simple docstring""" return sum(e for e in range(3 , _lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f"""{solution() = }""")
13
'''simple docstring''' __UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _a ( ) -> None: """simple docstring""" __snake_case : Dict = input("""Enter message: """ ) __snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ ) __snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): __snake_case : Any = """encrypt""" __snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase ) elif mode.lower().startswith("""d""" ): __snake_case : Optional[int] = """decrypt""" __snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase ) print(F'''\n{mode.title()}ed message:''' ) print(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : str = [] __snake_case : Dict = 0 __snake_case : Optional[int] = key.upper() for symbol in message: __snake_case : Any = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_lowerCamelCase ): __snake_case : Tuple = 0 else: translated.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' def _a ( _lowerCamelCase , _lowerCamelCase = False ) -> str: """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case : Optional[int] = F'''Expected string as input, found {type(_lowerCamelCase )}''' raise ValueError(_lowerCamelCase ) if not isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case : Union[str, Any] = F'''Expected boolean as use_pascal parameter, found {type(_lowerCamelCase )}''' raise ValueError(_lowerCamelCase ) __snake_case : List[Any] = input_str.split("""_""" ) __snake_case : List[Any] = 0 if use_pascal else 1 __snake_case : Any = words[start_index:] __snake_case : List[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] __snake_case : Union[str, Any] = """""" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
13
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for attribute in key.split(""".""" ): __snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case : Union[str, Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : Tuple = value elif weight_type == "bias": __snake_case : str = value else: __snake_case : List[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = [] __snake_case : List[Any] = fairseq_model.state_dict() __snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : Any = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __snake_case : Dict = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2] __snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase ) if "weight_g" in name: __snake_case : Dict = """weight_g""" elif "weight_v" in name: __snake_case : List[str] = """weight_v""" elif "weight" in name: __snake_case : str = """weight""" elif "bias" in name: __snake_case : int = """bias""" else: __snake_case : int = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Dict = full_name.split("""conv_layers.""" )[-1] __snake_case : Optional[int] = name.split(""".""" ) __snake_case : Dict = int(items[0] ) __snake_case : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case : List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : List[str] = SEWConfig() if is_finetuned: __snake_case : List[Any] = model.wav_encoder.wav_model.cfg else: __snake_case : Optional[Any] = model.cfg __snake_case : Tuple = fs_config.conv_bias __snake_case : List[Any] = eval(fs_config.conv_feature_layers ) __snake_case : List[Any] = [x[0] for x in conv_layers] __snake_case : Dict = [x[1] for x in conv_layers] __snake_case : Tuple = [x[2] for x in conv_layers] __snake_case : List[str] = """gelu""" __snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" __snake_case : Optional[int] = 0.0 __snake_case : Optional[Any] = fs_config.activation_fn.name __snake_case : Dict = fs_config.encoder_embed_dim __snake_case : Dict = 0.02 __snake_case : Any = fs_config.encoder_ffn_embed_dim __snake_case : Tuple = 1E-5 __snake_case : Dict = fs_config.encoder_layerdrop __snake_case : Any = fs_config.encoder_attention_heads __snake_case : int = fs_config.conv_pos_groups __snake_case : Tuple = fs_config.conv_pos __snake_case : Optional[int] = len(_lowerCamelCase ) __snake_case : int = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : Union[str, Any] = model.cfg __snake_case : Tuple = fs_config.final_dropout __snake_case : Tuple = fs_config.layerdrop __snake_case : Any = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : List[Any] = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : str = fs_config.mask_channel_length __snake_case : Any = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : str = fs_config.mask_prob __snake_case : str = """Wav2Vec2FeatureExtractor""" __snake_case : Dict = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int: """simple docstring""" if is_finetuned: __snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase ) else: __snake_case : int = convert_config(model[0] , _lowerCamelCase ) __snake_case : Dict = model[0].eval() __snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __snake_case : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if is_finetuned: if dict_path: __snake_case : str = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Union[str, Any] = target_dict.pad_index __snake_case : Optional[Any] = target_dict.bos_index __snake_case : Tuple = target_dict.pad_index __snake_case : List[str] = target_dict.bos_index __snake_case : Optional[Any] = target_dict.eos_index __snake_case : List[str] = len(target_dict.symbols ) __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" ) if not os.path.isdir(_lowerCamelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _lowerCamelCase ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case : List[str] = SEWForCTC(_lowerCamelCase ) else: __snake_case : List[str] = SEWModel(_lowerCamelCase ) feature_extractor.save_pretrained(_lowerCamelCase ) recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __UpperCamelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
13
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _A ( unittest.TestCase ): def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : str = tempfile.mkdtemp() __snake_case : List[Any] = BlipImageProcessor() __snake_case : Union[str, Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) __snake_case : List[Any] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) __snake_case : int = InstructBlipProcessor(__magic_name__ , __magic_name__ , __magic_name__ ) processor.save_pretrained(self.tmpdirname ) def lowercase__ ( self : int , **__magic_name__ : Any ) -> str: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer def lowercase__ ( self : Any , **__magic_name__ : Union[str, Any] ) -> List[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor def lowercase__ ( self : Union[str, Any] , **__magic_name__ : Tuple ) -> Optional[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).qformer_tokenizer def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : List[Any] ) -> int: """simple docstring""" __snake_case : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __snake_case : Dict = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase__ ( self : Dict ) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) __snake_case : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __snake_case : Tuple = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) __snake_case : Union[str, Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) self.assertIsInstance(processor.qformer_tokenizer , __magic_name__ ) def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : Tuple = self.get_image_processor() __snake_case : str = self.get_tokenizer() __snake_case : Optional[Any] = self.get_qformer_tokenizer() __snake_case : Optional[Any] = InstructBlipProcessor( tokenizer=__magic_name__ , image_processor=__magic_name__ , qformer_tokenizer=__magic_name__ ) __snake_case : int = self.prepare_image_inputs() __snake_case : Dict = image_processor(__magic_name__ , return_tensors="""np""" ) __snake_case : List[Any] = processor(images=__magic_name__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : Tuple = self.get_image_processor() __snake_case : List[str] = self.get_tokenizer() __snake_case : Any = self.get_qformer_tokenizer() __snake_case : List[Any] = InstructBlipProcessor( tokenizer=__magic_name__ , image_processor=__magic_name__ , qformer_tokenizer=__magic_name__ ) __snake_case : List[Any] = """lower newer""" __snake_case : Tuple = processor(text=__magic_name__ ) __snake_case : Tuple = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ ) __snake_case : List[str] = qformer_tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def lowercase__ ( self : int ) -> Optional[int]: """simple docstring""" __snake_case : str = self.get_image_processor() __snake_case : Optional[Any] = self.get_tokenizer() __snake_case : Union[str, Any] = self.get_qformer_tokenizer() __snake_case : Tuple = InstructBlipProcessor( tokenizer=__magic_name__ , image_processor=__magic_name__ , qformer_tokenizer=__magic_name__ ) __snake_case : List[str] = """lower newer""" __snake_case : Optional[int] = self.prepare_image_inputs() __snake_case : int = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : Dict = self.get_image_processor() __snake_case : List[str] = self.get_tokenizer() __snake_case : int = self.get_qformer_tokenizer() __snake_case : List[str] = InstructBlipProcessor( tokenizer=__magic_name__ , image_processor=__magic_name__ , qformer_tokenizer=__magic_name__ ) __snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case : Optional[Any] = processor.batch_decode(__magic_name__ ) __snake_case : int = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowercase__ ( self : Dict ) -> str: """simple docstring""" __snake_case : List[str] = self.get_image_processor() __snake_case : Optional[Any] = self.get_tokenizer() __snake_case : int = self.get_qformer_tokenizer() __snake_case : int = InstructBlipProcessor( tokenizer=__magic_name__ , image_processor=__magic_name__ , qformer_tokenizer=__magic_name__ ) __snake_case : Tuple = """lower newer""" __snake_case : Optional[Any] = self.prepare_image_inputs() __snake_case : Dict = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : Optional[int] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( _lowerCamelCase = 5000 ) -> int: """simple docstring""" __snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[int] = pentagonal_nums[j] __snake_case : str = pentagonal_i + pentagonal_j __snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
13
1
'''simple docstring''' def _a ( _lowerCamelCase ) -> list: """simple docstring""" if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(_lowerCamelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(_lowerCamelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
13
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : List[Any] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __snake_case : int = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __snake_case : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above __snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __snake_case : Dict = output[output != -float("""inf""" )] __snake_case : Optional[Any] = tf.cast( tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @require_tf class _A ( unittest.TestCase , __lowercase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase__: Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" __snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Optional[int] = 2 __snake_case : str = 2 class _A ( tf.Module ): def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Dict = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" __snake_case : Tuple = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : int = [[2, 0], [1_02, 1_03]] __snake_case : Tuple = [[1, 0], [1, 1]] __snake_case : Union[str, Any] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for batch_size in range(1 , len(__magic_name__ ) + 1 ): __snake_case : Union[str, Any] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } __snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""] __snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Dict = 1 __snake_case : int = 2 class _A ( tf.Module ): def __init__( self : Tuple , __magic_name__ : List[str] ) -> int: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : Union[str, Any] = [[2], [1_02, 1_03]] __snake_case : Tuple = [[1], [1, 1]] __snake_case : List[str] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for input_row in range(len(__magic_name__ ) ): __snake_case : Tuple = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } __snake_case : str = serving_func(**__magic_name__ )["""sequences"""] __snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow @require_tensorflow_text def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ ) class _A ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ) -> int: """simple docstring""" super().__init__() __snake_case : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() ) __snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ ) __snake_case , __snake_case : List[Any] = text.pad_model_inputs( __magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ ) return self.tokenizer.detokenize(__magic_name__ ) __snake_case : int = CompleteSentenceTransformer() __snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) __snake_case : Tuple = complete_model(__magic_name__ ) __snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ ) keras_model.save(__magic_name__ ) def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } __snake_case : str = 14 __snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : int = """Hello, my dog is cute and""" __snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" ) __snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : List[Any] = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __snake_case : Dict = [6_38, 1_98] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : str = """Hugging Face is a technology company based in New York and Paris.""" __snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids __snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : int = bart_model.generate(__magic_name__ ).numpy() class _A ( __lowercase ): def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) ) class _A ( bart_model.model.encoder.__class__ ): def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) __snake_case : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __snake_case : Dict = bart_model.generate(__magic_name__ ).numpy() with self.assertRaises(__magic_name__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__magic_name__ , foo="""bar""" )
13
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __UpperCamelCase = logging.get_logger(__name__) # General docstring __UpperCamelCase = "MobileNetV1Config" # Base docstring __UpperCamelCase = "google/mobilenet_v1_1.0_224" __UpperCamelCase = [1, 1024, 7, 7] # Image classification docstring __UpperCamelCase = "google/mobilenet_v1_1.0_224" __UpperCamelCase = "tabby, tabby cat" __UpperCamelCase = [ "google/mobilenet_v1_1.0_224", "google/mobilenet_v1_0.75_192", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> int: """simple docstring""" __snake_case : Optional[int] = {} if isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case : List[Any] = model.mobilenet_va else: __snake_case : int = model __snake_case : Optional[int] = """MobilenetV1/Conv2d_0/""" __snake_case : str = backbone.conv_stem.convolution.weight __snake_case : List[str] = backbone.conv_stem.normalization.bias __snake_case : Union[str, Any] = backbone.conv_stem.normalization.weight __snake_case : str = backbone.conv_stem.normalization.running_mean __snake_case : Any = backbone.conv_stem.normalization.running_var for i in range(13 ): __snake_case : Dict = i + 1 __snake_case : Dict = i * 2 __snake_case : List[str] = backbone.layer[pt_index] __snake_case : Optional[Any] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' __snake_case : Tuple = pointer.convolution.weight __snake_case : Optional[Any] = pointer.normalization.bias __snake_case : List[str] = pointer.normalization.weight __snake_case : str = pointer.normalization.running_mean __snake_case : Optional[Any] = pointer.normalization.running_var __snake_case : Optional[Any] = backbone.layer[pt_index + 1] __snake_case : Union[str, Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' __snake_case : Dict = pointer.convolution.weight __snake_case : str = pointer.normalization.bias __snake_case : Tuple = pointer.normalization.weight __snake_case : List[Any] = pointer.normalization.running_mean __snake_case : List[str] = pointer.normalization.running_var if isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case : Any = """MobilenetV1/Logits/Conv2d_1c_1x1/""" __snake_case : List[Any] = model.classifier.weight __snake_case : List[Any] = model.classifier.bias return tf_to_pt_map def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( """Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """ """https://www.tensorflow.org/install/ for installation instructions.""" ) raise # Load weights from TF model __snake_case : str = tf.train.list_variables(_lowerCamelCase ) __snake_case : List[Any] = {} for name, shape in init_vars: logger.info(F'''Loading TF weight {name} with shape {shape}''' ) __snake_case : int = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase ) __snake_case : int = array # Build TF to PyTorch weights loading map __snake_case : int = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for name, pointer in tf_to_pt_map.items(): logger.info(F'''Importing {name}''' ) if name not in tf_weights: logger.info(F'''{name} not in tf pre-trained weights, skipping''' ) continue __snake_case : List[Any] = tf_weights[name] if "depthwise_weights" in name: logger.info("""Transposing depthwise""" ) __snake_case : Optional[int] = np.transpose(_lowerCamelCase , (2, 3, 0, 1) ) elif "weights" in name: logger.info("""Transposing""" ) if len(pointer.shape ) == 2: # copying into linear layer __snake_case : Union[str, Any] = array.squeeze().transpose() else: __snake_case : Union[str, Any] = np.transpose(_lowerCamelCase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' ) __snake_case : List[Any] = torch.from_numpy(_lowerCamelCase ) tf_weights.pop(_lowerCamelCase , _lowerCamelCase ) tf_weights.pop(name + """/RMSProp""" , _lowerCamelCase ) tf_weights.pop(name + """/RMSProp_1""" , _lowerCamelCase ) tf_weights.pop(name + """/ExponentialMovingAverage""" , _lowerCamelCase ) logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' ) return model def _a ( _lowerCamelCase , _lowerCamelCase ) -> torch.Tensor: """simple docstring""" __snake_case , __snake_case : Dict = features.shape[-2:] __snake_case , __snake_case : Optional[int] = conv_layer.stride __snake_case , __snake_case : Optional[int] = conv_layer.kernel_size if in_height % stride_height == 0: __snake_case : str = max(kernel_height - stride_height , 0 ) else: __snake_case : int = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __snake_case : int = max(kernel_width - stride_width , 0 ) else: __snake_case : Any = max(kernel_width - (in_width % stride_width) , 0 ) __snake_case : Dict = pad_along_width // 2 __snake_case : str = pad_along_width - pad_left __snake_case : Tuple = pad_along_height // 2 __snake_case : Any = pad_along_height - pad_top __snake_case : str = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , """constant""" , 0.0 ) class _A ( nn.Module ): def __init__( self : int , __magic_name__ : MobileNetVaConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] = 1 , __magic_name__ : Optional[int] = 1 , __magic_name__ : bool = False , __magic_name__ : Optional[bool] = True , __magic_name__ : Optional[bool or str] = True , ) -> None: """simple docstring""" super().__init__() __snake_case : Dict = config if in_channels % groups != 0: raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) __snake_case : Any = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __snake_case : int = nn.Convad( in_channels=__magic_name__ , out_channels=__magic_name__ , kernel_size=__magic_name__ , stride=__magic_name__ , padding=__magic_name__ , groups=__magic_name__ , bias=__magic_name__ , padding_mode="""zeros""" , ) if use_normalization: __snake_case : List[str] = nn.BatchNormad( num_features=__magic_name__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=__magic_name__ , track_running_stats=__magic_name__ , ) else: __snake_case : Union[str, Any] = None if use_activation: if isinstance(__magic_name__ , __magic_name__ ): __snake_case : Dict = ACTaFN[use_activation] elif isinstance(config.hidden_act , __magic_name__ ): __snake_case : List[Any] = ACTaFN[config.hidden_act] else: __snake_case : Optional[Any] = config.hidden_act else: __snake_case : str = None def lowercase__ ( self : str , __magic_name__ : torch.Tensor ) -> torch.Tensor: """simple docstring""" if self.config.tf_padding: __snake_case : List[Any] = apply_tf_padding(__magic_name__ , self.convolution ) __snake_case : Any = self.convolution(__magic_name__ ) if self.normalization is not None: __snake_case : Dict = self.normalization(__magic_name__ ) if self.activation is not None: __snake_case : Union[str, Any] = self.activation(__magic_name__ ) return features class _A ( __lowercase ): lowercase__: Tuple = MobileNetVaConfig lowercase__: int = load_tf_weights_in_mobilenet_va lowercase__: Union[str, Any] = '''mobilenet_v1''' lowercase__: Any = '''pixel_values''' lowercase__: Optional[int] = False def lowercase__ ( self : Optional[Any] , __magic_name__ : Union[nn.Linear, nn.Convad] ) -> None: """simple docstring""" if isinstance(__magic_name__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__magic_name__ , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __UpperCamelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __UpperCamelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , __lowercase , ) class _A ( __lowercase ): def __init__( self : Optional[Any] , __magic_name__ : MobileNetVaConfig , __magic_name__ : bool = True ) -> Optional[int]: """simple docstring""" super().__init__(__magic_name__ ) __snake_case : Any = config __snake_case : Optional[int] = 32 __snake_case : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth ) __snake_case : Dict = MobileNetVaConvLayer( __magic_name__ , in_channels=config.num_channels , out_channels=__magic_name__ , kernel_size=3 , stride=2 , ) __snake_case : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __snake_case : Any = nn.ModuleList() for i in range(13 ): __snake_case : List[str] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __snake_case : str = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , kernel_size=3 , stride=strides[i] , groups=__magic_name__ , ) ) self.layer.append( MobileNetVaConvLayer( __magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , kernel_size=1 , ) ) __snake_case : str = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowercase__ ( self : Optional[Any] , __magic_name__ : int ) -> int: """simple docstring""" raise NotImplementedError @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowercase__ ( self : List[str] , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: """simple docstring""" __snake_case : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __snake_case : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) __snake_case : List[Any] = self.conv_stem(__magic_name__ ) __snake_case : Union[str, Any] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __snake_case : List[Any] = layer_module(__magic_name__ ) if output_hidden_states: __snake_case : Any = all_hidden_states + (hidden_states,) __snake_case : List[str] = hidden_states if self.pooler is not None: __snake_case : Optional[int] = torch.flatten(self.pooler(__magic_name__ ) , start_dim=1 ) else: __snake_case : Union[str, Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=__magic_name__ , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __lowercase , ) class _A ( __lowercase ): def __init__( self : List[str] , __magic_name__ : MobileNetVaConfig ) -> None: """simple docstring""" super().__init__(__magic_name__ ) __snake_case : Union[str, Any] = config.num_labels __snake_case : Optional[Any] = MobileNetVaModel(__magic_name__ ) __snake_case : str = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __snake_case : str = nn.Dropout(config.classifier_dropout_prob , inplace=__magic_name__ ) __snake_case : Dict = nn.Linear(__magic_name__ , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowercase__ ( self : List[str] , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" __snake_case : Tuple = return_dict if return_dict is not None else self.config.use_return_dict __snake_case : int = self.mobilenet_va(__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ ) __snake_case : Dict = outputs.pooler_output if return_dict else outputs[1] __snake_case : Any = self.classifier(self.dropout(__magic_name__ ) ) __snake_case : List[str] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __snake_case : Dict = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __snake_case : List[str] = """single_label_classification""" else: __snake_case : Tuple = """multi_label_classification""" if self.config.problem_type == "regression": __snake_case : Optional[Any] = MSELoss() if self.num_labels == 1: __snake_case : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __snake_case : Tuple = loss_fct(__magic_name__ , __magic_name__ ) elif self.config.problem_type == "single_label_classification": __snake_case : Optional[int] = CrossEntropyLoss() __snake_case : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __snake_case : List[str] = BCEWithLogitsLoss() __snake_case : str = loss_fct(__magic_name__ , __magic_name__ ) if not return_dict: __snake_case : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states , )
13
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" __snake_case : int = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def _a ( _lowerCamelCase ) -> None: """simple docstring""" __snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("""""" ) print(len(_lowerCamelCase ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
13
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCamelCase = logging.getLogger(__name__) class _A ( __lowercase ): def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" super().__init__( __magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , ) __snake_case : List[str] = None def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]: """simple docstring""" logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __snake_case : List[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __snake_case : List[str] = str(distributed_port + 1 ) __snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase__ ( self : int ) -> int: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ ) dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group ) return target_tensor def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ ) return ifname def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): __snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ ) # distributed training __snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic __snake_case : Tuple = None if self._is_main(): __snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )] dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group ) # scatter logic __snake_case : Optional[int] = question_hidden_states.shape[0] __snake_case : Optional[Any] = [] __snake_case : Any = [] if self._is_main(): assert len(__magic_name__ ) == world_size __snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ ) __snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa ) __snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
13
1
'''simple docstring''' import pytest import datasets # Import fixture modules as plugins __UpperCamelCase = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: """simple docstring""" for item in items: if any(marker in item.keywords for marker in ["""integration""", """unit"""] ): continue item.add_marker(pytest.mark.unit ) def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" ) @pytest.fixture(autouse=_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = tmp_path_factory.getbasetemp() / """cache""" __snake_case : List[str] = test_hf_cache_home / """datasets""" __snake_case : Union[str, Any] = test_hf_cache_home / """metrics""" __snake_case : Any = test_hf_cache_home / """modules""" monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(_lowerCamelCase ) ) monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(_lowerCamelCase ) ) monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(_lowerCamelCase ) ) __snake_case : List[str] = test_hf_datasets_cache / """downloads""" monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(_lowerCamelCase ) ) __snake_case : str = test_hf_datasets_cache / """downloads""" / """extracted""" monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_lowerCamelCase ) ) @pytest.fixture(autouse=_lowerCamelCase , scope="""session""" ) def _a ( ) -> List[Any]: """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_lowerCamelCase ) def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , _lowerCamelCase ) @pytest.fixture def _a ( _lowerCamelCase ) -> str: """simple docstring""" monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , _lowerCamelCase )
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class _A : lowercase__: str lowercase__: Optional[str] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return self.major, self.minor, self.patch def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return Version(__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): return other raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' ) def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]: """simple docstring""" try: __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) return self.tuple < other.tuple def __hash__( self : Any ) -> Any: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase__ ( self : str ) -> str: """simple docstring""" return self.version_str def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase ) if not res: raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" return ".".join(str(_lowerCamelCase ) for v in version_tuple )
13
1
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) __snake_case : Tuple = """""" while len(_lowerCamelCase ) % 3 != 0: __snake_case : Any = """0""" + bin_string __snake_case : Tuple = [ bin_string[index : index + 3] for index in range(len(_lowerCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __snake_case : Tuple = 0 for index, val in enumerate(_lowerCamelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) ) oct_string += str(_lowerCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
13
1
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests __UpperCamelCase = "https://api.github.com" # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user __UpperCamelCase = BASE_URL + "/user" # https://github.com/settings/tokens __UpperCamelCase = os.environ.get("USER_TOKEN", "") def _a ( _lowerCamelCase ) -> dict[Any, Any]: """simple docstring""" __snake_case : str = { """Authorization""": F'''token {auth_token}''', """Accept""": """application/vnd.github.v3+json""", } return requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(f"""{key}: {value}""") else: raise ValueError("'USER_TOKEN' field cannot be empty.")
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : str = 0 __snake_case : Optional[int] = len(_lowerCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" if len(_lowerCamelCase ) <= 1: return arr, 0 __snake_case : Any = len(_lowerCamelCase ) // 2 __snake_case : List[str] = arr[0:mid] __snake_case : int = arr[mid:] __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase ) __snake_case : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = [] __snake_case : List[str] = 0 while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , _lowerCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __snake_case : Any = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) # an empty list should also have zero inversions __snake_case : List[Any] = [] __snake_case : List[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) if __name__ == "__main__": main()
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
1
'''simple docstring''' def _a ( _lowerCamelCase , _lowerCamelCase ) -> list[str]: """simple docstring""" return [sentence[i : i + ngram_size] for i in range(len(_lowerCamelCase ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
13
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = image_size __snake_case : Tuple = patch_size __snake_case : str = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : Dict = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : List[Any] = shape_size __snake_case : Tuple = num_labels __snake_case : List[Any] = num_choices __snake_case : Optional[Any] = scope __snake_case : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : List[str] = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __snake_case : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Union[str, Any] = bbox[i, j, 3] __snake_case : Union[str, Any] = bbox[i, j, 1] __snake_case : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Optional[Any] = bbox[i, j, 2] __snake_case : Tuple = bbox[i, j, 0] __snake_case : Optional[Any] = tmp_coordinate __snake_case : Dict = tf.constant(__magic_name__ ) __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ ) # text + image __snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) __snake_case : List[str] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , ) __snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Any = self.num_labels __snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ ) __snake_case : Tuple = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = 2 __snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs __snake_case : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase__: Union[str, Any] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase__: Dict = False lowercase__: int = False lowercase__: Dict = False def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" return True def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict: """simple docstring""" __snake_case : Any = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = { k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : str = TFLayoutLMvaModelTester(self ) __snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ): # The number of elements in the loss should be the same as the number of elements in the label __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0] ] __snake_case : List[str] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = prepared_for_class.pop("""input_ids""" ) __snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : str = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __snake_case : str = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __snake_case : Dict = -1_00 __snake_case : str = tf.convert_to_tensor(__magic_name__ ) __snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = model(__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) # Get keys that were added with the _prepare_for_class function __snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys() __snake_case : Optional[Any] = inspect.signature(model.call ).parameters __snake_case : int = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __snake_case : Union[str, Any] = {0: """input_ids"""} for label_key in label_keys: __snake_case : int = signature_names.index(__magic_name__ ) __snake_case : Optional[int] = label_key __snake_case : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __snake_case : List[str] = prepared_for_class[value] __snake_case : str = tuple(__magic_name__ ) # Send to model __snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Tuple = type self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __snake_case : str = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values __snake_case : Tuple = tf.constant([[1, 2]] ) __snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) # verify the logits __snake_case : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Tuple = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class _A ( __lowercase ): def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : int = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def lowercase__ ( self : Any ) -> Dict: """simple docstring""" with self.assertRaises(__magic_name__ ): __snake_case : Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def lowercase__ ( self : Any ) -> str: """simple docstring""" with self.assertRaises(__magic_name__ ): __snake_case : int = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : List[Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def lowercase__ ( self : str ) -> int: """simple docstring""" with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): __snake_case : List[str] = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __snake_case : Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def lowercase__ ( self : List[str] ) -> Dict: """simple docstring""" __snake_case : List[str] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def lowercase__ ( self : Any ) -> Dict: """simple docstring""" with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): __snake_case : Any = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def lowercase__ ( self : str ) -> Union[str, Any]: """simple docstring""" __snake_case : List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" import PIL.Image __snake_case : List[Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=__magic_name__ ) as mock_cast_to_python_objects: __snake_case : Dict = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) ) __snake_case , __snake_case : Tuple = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , __magic_name__ ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : List[Any] = pa.BufferReader(_lowerCamelCase ) if isinstance(_lowerCamelCase , pa.Buffer ) else pa.memory_map(_lowerCamelCase ) __snake_case : int = pa.ipc.open_stream(_lowerCamelCase ) __snake_case : pa.Table = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" __snake_case : int = pa.BufferOutputStream() __snake_case : Optional[int] = pa.schema(_lowerCamelCase ) if fields else None with ArrowWriter(stream=_lowerCamelCase , schema=_lowerCamelCase , writer_batch_size=_lowerCamelCase ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) __snake_case , __snake_case : List[str] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: __snake_case : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_lowerCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def _a ( ) -> int: """simple docstring""" __snake_case : Optional[int] = pa.BufferOutputStream() __snake_case : Dict = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=_lowerCamelCase , features=_lowerCamelCase ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) __snake_case , __snake_case : Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata __snake_case : Optional[Any] = pa.BufferReader(output.getvalue() ) __snake_case : Optional[int] = pa.ipc.open_stream(_lowerCamelCase ) __snake_case : pa.Table = f.read_all() __snake_case : Optional[int] = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(_lowerCamelCase ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def _a ( _lowerCamelCase ) -> str: """simple docstring""" __snake_case : int = pa.BufferOutputStream() with ArrowWriter( stream=_lowerCamelCase , writer_batch_size=_lowerCamelCase , hash_salt="""split_name""" , check_duplicates=_lowerCamelCase , ) as writer: with pytest.raises(_lowerCamelCase ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) __snake_case , __snake_case : Tuple = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Any = pa.BufferOutputStream() with ArrowWriter( stream=_lowerCamelCase , writer_batch_size=_lowerCamelCase , hash_salt="""split_name""" , check_duplicates=_lowerCamelCase , ) as writer: with pytest.raises(_lowerCamelCase ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) __snake_case , __snake_case : List[str] = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def _a ( _lowerCamelCase ) -> str: """simple docstring""" __snake_case : int = pa.BufferOutputStream() with ArrowWriter( stream=_lowerCamelCase , writer_batch_size=_lowerCamelCase , hash_salt="""split_name""" , check_duplicates=_lowerCamelCase , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) __snake_case , __snake_case : List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: """simple docstring""" __snake_case : str = pa.BufferOutputStream() __snake_case : Any = pa.schema(_lowerCamelCase ) if fields else None with ArrowWriter(stream=_lowerCamelCase , schema=_lowerCamelCase , writer_batch_size=_lowerCamelCase ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) __snake_case , __snake_case : int = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: __snake_case : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_lowerCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" __snake_case : int = pa.BufferOutputStream() __snake_case : List[str] = pa.schema(_lowerCamelCase ) if fields else None with ArrowWriter(stream=_lowerCamelCase , schema=_lowerCamelCase , writer_batch_size=_lowerCamelCase ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) __snake_case , __snake_case : Union[str, Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: __snake_case : str = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_lowerCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Optional[int] = pa.BufferOutputStream() __snake_case : str = pa.schema(_lowerCamelCase ) if fields else None with ArrowWriter(stream=_lowerCamelCase , schema=_lowerCamelCase , writer_batch_size=_lowerCamelCase ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) __snake_case , __snake_case : int = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: __snake_case : Optional[int] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_lowerCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def _a ( ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} __snake_case : List[str] = os.path.join(_lowerCamelCase , """test.arrow""" ) with ArrowWriter(path=_lowerCamelCase , schema=pa.schema(_lowerCamelCase ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) __snake_case , __snake_case : Tuple = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(_lowerCamelCase , metadata=writer._schema.metadata ) _check_output(_lowerCamelCase , 1 ) def _a ( _lowerCamelCase ) -> Optional[Any]: """simple docstring""" if pa.types.is_list(_lowerCamelCase ): return get_base_dtype(arr_type.value_type ) else: return arr_type def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" if isinstance(lst[0] , _lowerCamelCase ): change_first_primitive_element_in_list(lst[0] , _lowerCamelCase ) else: __snake_case : int = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : List[Any] = pa.array(TypedSequence(_lowerCamelCase , optimized_int_type=_lowerCamelCase ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : str = pa.array(OptimizedTypedSequence(_lowerCamelCase , col=_lowerCamelCase ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications __snake_case : Optional[int] = copy.deepcopy(_lowerCamelCase ) __snake_case : Dict = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(_lowerCamelCase , _lowerCamelCase ) __snake_case : Dict = pa.array(OptimizedTypedSequence(_lowerCamelCase , col=_lowerCamelCase ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : int = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=_lowerCamelCase ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def _a ( _lowerCamelCase ) -> List[Any]: """simple docstring""" __snake_case : int = """mock://dataset-train.arrow""" with ArrowWriter(path=_lowerCamelCase , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(_lowerCamelCase ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) __snake_case , __snake_case : Dict = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(_lowerCamelCase ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : str = pa.BufferOutputStream() with ParquetWriter(stream=_lowerCamelCase ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) __snake_case , __snake_case : List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 __snake_case : str = pa.BufferReader(output.getvalue() ) __snake_case : pa.Table = pq.read_table(_lowerCamelCase ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: """simple docstring""" import PIL.Image __snake_case : Optional[Any] = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_lowerCamelCase , format="""png""" ) __snake_case : Union[str, Any] = pa.BufferOutputStream() with ParquetWriter( stream=_lowerCamelCase , features=Features({"""image""": Image()} ) , embed_local_files=_lowerCamelCase ) as writer: writer.write({"""image""": image_path} ) writer.finalize() __snake_case : Any = pa.BufferReader(output.getvalue() ) __snake_case : pa.Table = pq.read_table(_lowerCamelCase ) __snake_case : Union[str, Any] = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , _lowerCamelCase ) with open(_lowerCamelCase , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def _a ( ) -> str: """simple docstring""" __snake_case : List[Any] = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_lowerCamelCase )] ) __snake_case : Dict = pa.BufferOutputStream() with ArrowWriter(stream=_lowerCamelCase ) as writer: writer._build_writer(inferred_schema=_lowerCamelCase ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
13
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]: """simple docstring""" __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[Any] = num_channels __snake_case : List[str] = patch_size __snake_case : List[str] = num_frames __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : str = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = attention_type __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scope __snake_case : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case : str = (image_size // patch_size) ** 2 __snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case : str = self.num_labels return config def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" __snake_case : Optional[int] = TimesformerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str: """simple docstring""" __snake_case : Any = TimesformerForVideoClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) # verify the logits shape __snake_case : Dict = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__: List[Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__: List[str] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : List[str] = TimesformerModelTester(self ) __snake_case : List[Any] = ConfigTester( self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int: """simple docstring""" __snake_case : Dict = copy.deepcopy(__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = TimesformerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : List[str] = self.model_tester.seq_length __snake_case : Tuple = self.model_tester.num_frames __snake_case : str = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Dict = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Optional[int] = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case : int = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Optional[int] = True __snake_case : Optional[int] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : List[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ): __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.hidden_states __snake_case : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : List[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( __magic_name__ ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : Dict = prepare_video() __snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json", # See all Dinat models at https://huggingface.co/models?filter=dinat } class _A ( __lowercase , __lowercase ): lowercase__: Union[str, Any] = '''dinat''' lowercase__: str = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Tuple , __magic_name__ : str=4 , __magic_name__ : str=3 , __magic_name__ : int=64 , __magic_name__ : Tuple=[3, 4, 6, 5] , __magic_name__ : Any=[2, 4, 8, 16] , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __magic_name__ : Tuple=3.0 , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Dict="gelu" , __magic_name__ : Any=0.02 , __magic_name__ : Union[str, Any]=1E-5 , __magic_name__ : str=0.0 , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=None , **__magic_name__ : int , ) -> str: """simple docstring""" super().__init__(**__magic_name__ ) __snake_case : Optional[int] = patch_size __snake_case : Tuple = num_channels __snake_case : List[str] = embed_dim __snake_case : List[str] = depths __snake_case : List[Any] = len(__magic_name__ ) __snake_case : Optional[Any] = num_heads __snake_case : List[Any] = kernel_size __snake_case : Tuple = dilations __snake_case : List[Any] = mlp_ratio __snake_case : Optional[int] = qkv_bias __snake_case : str = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : Optional[Any] = drop_path_rate __snake_case : str = hidden_act __snake_case : Tuple = layer_norm_eps __snake_case : Optional[int] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __snake_case : str = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) ) __snake_case : str = layer_scale_init_value __snake_case : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__magic_name__ ) + 1 )] __snake_case , __snake_case : str = get_aligned_output_features_output_indices( out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ConditionalDetrFeatureExtractor"] __UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : str = [0 for i in range(r + 1 )] # nc0 = 1 __snake_case : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __snake_case : int = min(_lowerCamelCase , _lowerCamelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : str = 0 __snake_case : Optional[int] = len(_lowerCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" if len(_lowerCamelCase ) <= 1: return arr, 0 __snake_case : Any = len(_lowerCamelCase ) // 2 __snake_case : List[str] = arr[0:mid] __snake_case : int = arr[mid:] __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase ) __snake_case : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = [] __snake_case : List[str] = 0 while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , _lowerCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __snake_case : Any = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) # an empty list should also have zero inversions __snake_case : List[Any] = [] __snake_case : List[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' from collections.abc import Callable def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(_lowerCamelCase ) == 0: # one of the a or b is a root for the function return a elif function(_lowerCamelCase ) == 0: return b elif ( function(_lowerCamelCase ) * function(_lowerCamelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("""could not find root in given interval.""" ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_lowerCamelCase ) == 0: return mid elif function(_lowerCamelCase ) * function(_lowerCamelCase ) < 0: __snake_case : Dict = mid else: __snake_case : Union[str, Any] = mid __snake_case : Dict = start + (end - start) / 2.0 return mid def _a ( _lowerCamelCase ) -> float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
13
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
13
1
'''simple docstring''' from collections.abc import Callable class _A : def __init__( self : List[Any] , __magic_name__ : Callable | None = None ) -> None: """simple docstring""" __snake_case : list = [] # Stores indexes of each item for supporting updates and deletion. __snake_case : dict = {} # Stores current size of heap. __snake_case : List[Any] = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. __snake_case : Union[str, Any] = key or (lambda __magic_name__ : x) def lowercase__ ( self : Any , __magic_name__ : int ) -> int | None: """simple docstring""" return int((i - 1) / 2 ) if i > 0 else None def lowercase__ ( self : Optional[int] , __magic_name__ : int ) -> int | None: """simple docstring""" __snake_case : Optional[Any] = int(2 * i + 1 ) return left if 0 < left < self.size else None def lowercase__ ( self : Tuple , __magic_name__ : int ) -> int | None: """simple docstring""" __snake_case : List[Any] = int(2 * i + 2 ) return right if 0 < right < self.size else None def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" __snake_case , __snake_case : List[Any] = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. __snake_case , __snake_case : str = self.arr[j], self.arr[i] def lowercase__ ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int ) -> bool: """simple docstring""" return self.arr[i][1] < self.arr[j][1] def lowercase__ ( self : Dict , __magic_name__ : int ) -> int: """simple docstring""" __snake_case : Any = self._left(__magic_name__ ) __snake_case : Optional[int] = self._right(__magic_name__ ) __snake_case : str = i if left is not None and not self._cmp(__magic_name__ , __magic_name__ ): __snake_case : Dict = left if right is not None and not self._cmp(__magic_name__ , __magic_name__ ): __snake_case : List[Any] = right return valid_parent def lowercase__ ( self : List[Any] , __magic_name__ : int ) -> None: """simple docstring""" __snake_case : Dict = self._parent(__magic_name__ ) while parent is not None and not self._cmp(__magic_name__ , __magic_name__ ): self._swap(__magic_name__ , __magic_name__ ) __snake_case , __snake_case : Optional[int] = parent, self._parent(__magic_name__ ) def lowercase__ ( self : List[str] , __magic_name__ : int ) -> None: """simple docstring""" __snake_case : Dict = self._get_valid_parent(__magic_name__ ) while valid_parent != index: self._swap(__magic_name__ , __magic_name__ ) __snake_case , __snake_case : Dict = valid_parent, self._get_valid_parent(__magic_name__ ) def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" if item not in self.pos_map: return __snake_case : int = self.pos_map[item] __snake_case : Union[str, Any] = [item, self.key(__magic_name__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(__magic_name__ ) self._heapify_down(__magic_name__ ) def lowercase__ ( self : Any , __magic_name__ : int ) -> None: """simple docstring""" if item not in self.pos_map: return __snake_case : List[str] = self.pos_map[item] del self.pos_map[item] __snake_case : List[str] = self.arr[self.size - 1] __snake_case : Dict = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(__magic_name__ ) self._heapify_down(__magic_name__ ) def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" __snake_case : Optional[Any] = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(__magic_name__ )] ) else: __snake_case : Dict = [item, self.key(__magic_name__ )] __snake_case : Dict = self.size self.size += 1 self._heapify_up(self.size - 1 ) def lowercase__ ( self : str ) -> tuple | None: """simple docstring""" return self.arr[0] if self.size else None def lowercase__ ( self : Optional[Any] ) -> tuple | None: """simple docstring""" __snake_case : Optional[Any] = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def _a ( ) -> None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = CanineTokenizer lowercase__: Optional[int] = False def lowercase__ ( self : Any ) -> Any: """simple docstring""" super().setUp() __snake_case : Dict = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer: """simple docstring""" __snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) __snake_case : Optional[Any] = 10_24 return tokenizer @require_torch def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[Any] = self.canine_tokenizer __snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off __snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Any = self.canine_tokenizer __snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] __snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.canine_tokenizer __snake_case : Optional[Any] = [ """What's the weater?""", """It's about 25 degrees.""", ] __snake_case : Any = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Dict = tempfile.mkdtemp() __snake_case : str = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) __snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Optional[int] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __snake_case : List[Any] = chr(0xE007 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE005 __snake_case : Tuple = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) __snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) __snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) __snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) __snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : Dict = chr(0xE005 ) __snake_case : str = chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) __snake_case : Tuple = tokenizer.tokenize(__magic_name__ ) __snake_case : Any = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __snake_case : Optional[Any] = 0xE006 __snake_case : List[str] = chr(__magic_name__ ) __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Any = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Tuple = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE006 __snake_case : int = chr(__magic_name__ ) __snake_case : List[Any] = [new_token_a] __snake_case : Union[str, Any] = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __snake_case : Any = 0xE007 __snake_case : Any = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] __snake_case : Union[str, Any] = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : List[str] = """hello world""" if self.space_between_special_tokens: __snake_case : Union[str, Any] = """[CLS] hello world [SEP]""" else: __snake_case : List[Any] = input __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowercase__ ( self : Tuple ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : str = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __snake_case : Dict = """a""" __snake_case : Tuple = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) __snake_case : Dict = 0xE006 __snake_case : str = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" pass def lowercase__ ( self : str ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass
13
1
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" __snake_case : int = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def _a ( _lowerCamelCase ) -> None: """simple docstring""" __snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("""""" ) print(len(_lowerCamelCase ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
13
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class _A ( __lowercase , __lowercase ): lowercase__: str = '''resnet''' lowercase__: Dict = ['''basic''', '''bottleneck'''] def __init__( self : Dict , __magic_name__ : int=3 , __magic_name__ : Optional[int]=64 , __magic_name__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , __magic_name__ : Optional[int]=[3, 4, 6, 3] , __magic_name__ : List[Any]="bottleneck" , __magic_name__ : int="relu" , __magic_name__ : Union[str, Any]=False , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , **__magic_name__ : Any , ) -> Union[str, Any]: """simple docstring""" super().__init__(**__magic_name__ ) if layer_type not in self.layer_types: raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) __snake_case : Tuple = num_channels __snake_case : List[Any] = embedding_size __snake_case : int = hidden_sizes __snake_case : List[Any] = depths __snake_case : int = layer_type __snake_case : Optional[Any] = hidden_act __snake_case : List[Any] = downsample_in_first_stage __snake_case : Tuple = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__magic_name__ ) + 1 )] __snake_case , __snake_case : List[str] = get_aligned_output_features_output_indices( out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names ) class _A ( __lowercase ): lowercase__: List[str] = version.parse('''1.11''' ) @property def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowercase__ ( self : str ) -> float: """simple docstring""" return 1E-3
13
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class _A ( __lowercase ): lowercase__: str = '''codegen''' lowercase__: Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int: """simple docstring""" __snake_case : List[str] = vocab_size __snake_case : Union[str, Any] = n_ctx __snake_case : int = n_positions __snake_case : str = n_embd __snake_case : Dict = n_layer __snake_case : List[Any] = n_head __snake_case : Any = n_inner __snake_case : str = rotary_dim __snake_case : List[str] = activation_function __snake_case : Tuple = resid_pdrop __snake_case : Dict = embd_pdrop __snake_case : int = attn_pdrop __snake_case : Tuple = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = use_cache __snake_case : Dict = bos_token_id __snake_case : Union[str, Any] = eos_token_id super().__init__( bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ ) class _A ( __lowercase ): def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple: """simple docstring""" super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ ) if not getattr(self._config , """pad_token_id""" , __magic_name__ ): # TODO: how to do that better? __snake_case : List[str] = 0 @property def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) __snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: __snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return self._config.n_layer @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.n_head def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() __snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __snake_case , __snake_case : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __snake_case : Tuple = seqlen + 2 __snake_case : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : List[str] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] __snake_case : Optional[int] = common_inputs["""attention_mask"""] if self.use_past: __snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype __snake_case : Optional[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return 13
13
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run __UpperCamelCase = True except (ImportError, AttributeError): __UpperCamelCase = object def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Any: """simple docstring""" pass __UpperCamelCase = False __UpperCamelCase = logging.get_logger("transformers-cli/serving") def _a ( _lowerCamelCase ) -> List[Any]: """simple docstring""" __snake_case : Tuple = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(_lowerCamelCase , args.host , args.port , args.workers ) class _A ( __lowercase ): lowercase__: dict class _A ( __lowercase ): lowercase__: List[str] lowercase__: Optional[List[int]] class _A ( __lowercase ): lowercase__: str class _A ( __lowercase ): lowercase__: Any class _A ( __lowercase ): @staticmethod def lowercase__ ( __magic_name__ : ArgumentParser ) -> Dict: """simple docstring""" __snake_case : int = parser.add_parser( """serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" ) serve_parser.add_argument( """--task""" , type=__magic_name__ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , ) serve_parser.add_argument("""--host""" , type=__magic_name__ , default="""localhost""" , help="""Interface the server will listen on.""" ) serve_parser.add_argument("""--port""" , type=__magic_name__ , default=88_88 , help="""Port the serving will listen to.""" ) serve_parser.add_argument("""--workers""" , type=__magic_name__ , default=1 , help="""Number of http workers""" ) serve_parser.add_argument("""--model""" , type=__magic_name__ , help="""Model's name or path to stored model.""" ) serve_parser.add_argument("""--config""" , type=__magic_name__ , help="""Model's config name or path to stored model.""" ) serve_parser.add_argument("""--tokenizer""" , type=__magic_name__ , help="""Tokenizer name to use.""" ) serve_parser.add_argument( """--device""" , type=__magic_name__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , ) serve_parser.set_defaults(func=__magic_name__ ) def __init__( self : Any , __magic_name__ : Pipeline , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> Optional[Any]: """simple docstring""" __snake_case : Dict = pipeline __snake_case : str = host __snake_case : List[Any] = port __snake_case : Any = workers if not _serve_dependencies_installed: raise RuntimeError( """Using serve command requires FastAPI and uvicorn. """ """Please install transformers with [serving]: pip install \"transformers[serving]\".""" """Or install FastAPI and uvicorn separately.""" ) else: logger.info(f'''Serving model over {host}:{port}''' ) __snake_case : int = FastAPI( routes=[ APIRoute( """/""" , self.model_info , response_model=__magic_name__ , response_class=__magic_name__ , methods=["""GET"""] , ), APIRoute( """/tokenize""" , self.tokenize , response_model=__magic_name__ , response_class=__magic_name__ , methods=["""POST"""] , ), APIRoute( """/detokenize""" , self.detokenize , response_model=__magic_name__ , response_class=__magic_name__ , methods=["""POST"""] , ), APIRoute( """/forward""" , self.forward , response_model=__magic_name__ , response_class=__magic_name__ , methods=["""POST"""] , ), ] , timeout=6_00 , ) def lowercase__ ( self : int ) -> Dict: """simple docstring""" run(self._app , host=self.host , port=self.port , workers=self.workers ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def lowercase__ ( self : Any , __magic_name__ : str = Body(__magic_name__ , embed=__magic_name__ ) , __magic_name__ : bool = Body(__magic_name__ , embed=__magic_name__ ) ) -> Any: """simple docstring""" try: __snake_case : Union[str, Any] = self._pipeline.tokenizer.tokenize(__magic_name__ ) if return_ids: __snake_case : Tuple = self._pipeline.tokenizer.convert_tokens_to_ids(__magic_name__ ) return ServeTokenizeResult(tokens=__magic_name__ , tokens_ids=__magic_name__ ) else: return ServeTokenizeResult(tokens=__magic_name__ ) except Exception as e: raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(__magic_name__ )} ) def lowercase__ ( self : Any , __magic_name__ : List[int] = Body(__magic_name__ , embed=__magic_name__ ) , __magic_name__ : bool = Body(__magic_name__ , embed=__magic_name__ ) , __magic_name__ : bool = Body(__magic_name__ , embed=__magic_name__ ) , ) -> List[Any]: """simple docstring""" try: __snake_case : Dict = self._pipeline.tokenizer.decode(__magic_name__ , __magic_name__ , __magic_name__ ) return ServeDeTokenizeResult(model="""""" , text=__magic_name__ ) except Exception as e: raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(__magic_name__ )} ) async def lowercase__ ( self : Dict , __magic_name__ : Union[str, Any]=Body(__magic_name__ , embed=__magic_name__ ) ) -> Optional[int]: """simple docstring""" if len(__magic_name__ ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model __snake_case : Union[str, Any] = self._pipeline(__magic_name__ ) return ServeForwardResult(output=__magic_name__ ) except Exception as e: raise HTTPException(5_00 , {"""error""": str(__magic_name__ )} )
13
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( __lowercase , unittest.TestCase ): lowercase__: int = KandinskyImgaImgPipeline lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] lowercase__: int = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] lowercase__: List[Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowercase__: Any = False @property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return 32 @property def lowercase__ ( self : str ) -> str: """simple docstring""" return 32 @property def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" return self.time_input_dim @property def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return 1_00 @property def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" __snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) __snake_case : Tuple = MultilingualCLIP(__magic_name__ ) __snake_case : Optional[Any] = text_encoder.eval() return text_encoder @property def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __snake_case : Tuple = UNetaDConditionModel(**__magic_name__ ) return model @property def lowercase__ ( self : str ) -> Dict: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : Tuple = self.dummy_text_encoder __snake_case : Dict = self.dummy_tokenizer __snake_case : Dict = self.dummy_unet __snake_case : int = self.dummy_movq __snake_case : List[Any] = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __snake_case : Dict = DDIMScheduler(**__magic_name__ ) __snake_case : Any = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str: """simple docstring""" __snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ ) # create init_image __snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__magic_name__ ).startswith("""mps""" ): __snake_case : str = torch.manual_seed(__magic_name__ ) else: __snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : Optional[Any] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : Dict = """cpu""" __snake_case : Union[str, Any] = self.get_dummy_components() __snake_case : List[str] = self.pipeline_class(**__magic_name__ ) __snake_case : Optional[Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) ) __snake_case : List[str] = output.images __snake_case : Any = pipe( **self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0] __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : int = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) __snake_case : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __snake_case : List[Any] = """A red cartoon frog, 4k""" __snake_case : str = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__magic_name__ ) __snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) __snake_case : Any = pipeline.to(__magic_name__ ) pipeline.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case , __snake_case : Optional[Any] = pipe_prior( __magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __snake_case : List[str] = pipeline( __magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) __snake_case : Dict = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
13
1
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __UpperCamelCase = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _A ( datasets.BuilderConfig ): lowercase__: Optional[datasets.Features] = None def _a ( _lowerCamelCase , _lowerCamelCase , ) -> Optional[int]: """simple docstring""" import pyspark def generate_fn(): __snake_case : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: __snake_case : List[str] = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" ) __snake_case : Any = partition_df.collect() __snake_case : int = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class _A ( _BaseExamplesIterable ): def __init__( self : Union[str, Any] , __magic_name__ : "pyspark.sql.DataFrame" , __magic_name__ : Union[str, Any]=None , ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = df __snake_case : Tuple = partition_order or range(self.df.rdd.getNumPartitions() ) __snake_case : Any = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : str ) -> int: """simple docstring""" yield from self.generate_examples_fn() def lowercase__ ( self : Optional[Any] , __magic_name__ : np.random.Generator ) -> "SparkExamplesIterable": """simple docstring""" __snake_case : Any = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(__magic_name__ ) return SparkExamplesIterable(self.df , partition_order=__magic_name__ ) def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : int ) -> "SparkExamplesIterable": """simple docstring""" __snake_case : int = self.split_shard_indices_by_worker(__magic_name__ , __magic_name__ ) return SparkExamplesIterable(self.df , partition_order=__magic_name__ ) @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return len(self.partition_order ) class _A ( datasets.DatasetBuilder ): lowercase__: Optional[int] = SparkConfig def __init__( self : Union[str, Any] , __magic_name__ : "pyspark.sql.DataFrame" , __magic_name__ : str = None , __magic_name__ : str = None , **__magic_name__ : int , ) -> str: """simple docstring""" import pyspark __snake_case : Optional[Any] = pyspark.sql.SparkSession.builder.getOrCreate() __snake_case : Optional[int] = df __snake_case : Dict = working_dir super().__init__( cache_dir=__magic_name__ , config_name=str(self.df.semanticHash() ) , **__magic_name__ , ) def lowercase__ ( self : Any ) -> List[str]: """simple docstring""" def create_cache_and_write_probe(__magic_name__ : Tuple ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=__magic_name__ ) __snake_case : List[Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(__magic_name__ , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __snake_case : str = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__magic_name__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def lowercase__ ( self : Dict ) -> Dict: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : Any , __magic_name__ : datasets.download.download_manager.DownloadManager ) -> str: """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowercase__ ( self : str , __magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" import pyspark def get_arrow_batch_size(__magic_name__ : int ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) __snake_case : List[str] = self.df.count() __snake_case : int = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __snake_case : Tuple = ( self.df.limit(__magic_name__ ) .repartition(1 ) .mapInArrow(__magic_name__ , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) __snake_case : Optional[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __snake_case : str = min(__magic_name__ , int(approx_total_size / max_shard_size ) ) __snake_case : Optional[int] = self.df.repartition(__magic_name__ ) def lowercase__ ( self : Tuple , __magic_name__ : str , __magic_name__ : str , __magic_name__ : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: """simple docstring""" import pyspark __snake_case : str = ParquetWriter if file_format == """parquet""" else ArrowWriter __snake_case : Tuple = os.path.join(self._working_dir , os.path.basename(__magic_name__ ) ) if self._working_dir else fpath __snake_case : List[Any] = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __snake_case : Union[str, Any] = self.config.features __snake_case : Dict = self._writer_batch_size __snake_case : str = self._fs.storage_options def write_arrow(__magic_name__ : Tuple ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __snake_case : Optional[Any] = pyspark.TaskContext().taskAttemptId() __snake_case : Union[str, Any] = next(__magic_name__ , __magic_name__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) __snake_case : Any = 0 __snake_case : List[str] = writer_class( features=__magic_name__ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=__magic_name__ , storage_options=__magic_name__ , embed_local_files=__magic_name__ , ) __snake_case : Union[str, Any] = pa.Table.from_batches([first_batch] ) writer.write_table(__magic_name__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __snake_case , __snake_case : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 __snake_case : int = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=__magic_name__ , storage_options=__magic_name__ , embed_local_files=__magic_name__ , ) __snake_case : Optional[Any] = pa.Table.from_batches([batch] ) writer.write_table(__magic_name__ ) if writer._num_bytes > 0: __snake_case , __snake_case : List[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(__magic_name__ ) ): __snake_case : Union[str, Any] = os.path.join(os.path.dirname(__magic_name__ ) , os.path.basename(__magic_name__ ) ) shutil.move(__magic_name__ , __magic_name__ ) __snake_case : Optional[int] = ( self.df.mapInArrow(__magic_name__ , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowercase__ ( self : List[str] , __magic_name__ : "datasets.SplitGenerator" , __magic_name__ : str = "arrow" , __magic_name__ : Optional[Union[str, int]] = None , __magic_name__ : Optional[int] = None , **__magic_name__ : List[Any] , ) -> Optional[Any]: """simple docstring""" self._validate_cache_dir() __snake_case : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(__magic_name__ ) __snake_case : str = not is_remote_filesystem(self._fs ) __snake_case : Optional[Any] = os.path.join if is_local else posixpath.join __snake_case : int = """-TTTTT-SSSSS-of-NNNNN""" __snake_case : int = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' __snake_case : str = path_join(self._output_dir , __magic_name__ ) __snake_case : Any = 0 __snake_case : List[Any] = 0 __snake_case : Optional[int] = 0 __snake_case : Union[str, Any] = [] __snake_case : str = [] for task_id, content in self._prepare_split_single(__magic_name__ , __magic_name__ , __magic_name__ ): ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Optional[Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(__magic_name__ ) __snake_case : Optional[int] = total_num_examples __snake_case : int = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: __snake_case : Dict = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __snake_case : Any = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , ): rename( __magic_name__ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , ) __snake_case : int = [] __snake_case : Optional[Any] = 0 for i in range(len(__magic_name__ ) ): __snake_case , __snake_case : Tuple = task_id_and_num_shards[i] for shard_id in range(__magic_name__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(__magic_name__ , len(__magic_name__ ) ).map(lambda __magic_name__ : _rename_shard(*__magic_name__ ) ).collect() else: # don't use any pattern __snake_case : Optional[int] = 0 __snake_case : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(__magic_name__ , """""" ) , ) def lowercase__ ( self : Tuple , __magic_name__ : "datasets.SplitGenerator" , ) -> SparkExamplesIterable: """simple docstring""" return SparkExamplesIterable(self.df )
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __UpperCamelCase = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __UpperCamelCase = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } class _A ( __lowercase ): lowercase__: Any = VOCAB_FILES_NAMES lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask'''] lowercase__: List[str] = BartTokenizer def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]: """simple docstring""" super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) __snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) ) __snake_case : str = add_prefix_space __snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ ) __snake_case : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __snake_case : Any = """post_processor""" __snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: __snake_case : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __snake_case : Tuple = tuple(state["""sep"""] ) if "cls" in state: __snake_case : int = tuple(state["""cls"""] ) __snake_case : Optional[int] = False if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : Optional[Any] = add_prefix_space __snake_case : List[str] = True if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets: __snake_case : Optional[int] = trim_offsets __snake_case : Any = True if changes_to_apply: __snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) ) __snake_case : List[Any] = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def lowercase__ ( self : List[Any] ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value __snake_case : Union[str, Any] = value def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding: """simple docstring""" __snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding: """simple docstring""" __snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Optional[int] = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
13
1
'''simple docstring''' def _a ( _lowerCamelCase ) -> int: """simple docstring""" __snake_case : list[list[int]] = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): __snake_case : Tuple = 1 for n in range(m + 1 ): for k in range(1 , _lowerCamelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __UpperCamelCase = int(input("Enter a number: ").strip()) print(partition(n)) except ValueError: print("Please enter a number.") else: try: __UpperCamelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print("Please pass a number.")
13
'''simple docstring''' import os import numpy import onnx def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Optional[int] = a.name __snake_case : Dict = b.name __snake_case : Optional[int] = """""" __snake_case : int = """""" __snake_case : Any = a == b __snake_case : List[Any] = name_a __snake_case : List[str] = name_b return res def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_lowerCamelCase , _lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Dict = list(model.graph.initializer ) __snake_case : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __snake_case : Tuple = inits[i].name __snake_case : Tuple = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : str = os.path.dirname(_lowerCamelCase ) __snake_case : Dict = os.path.basename(_lowerCamelCase ) __snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) ) __snake_case : Dict = list(model.graph.initializer ) __snake_case : Optional[int] = set() __snake_case : Optional[Any] = {} __snake_case : Tuple = [] __snake_case : List[Any] = 0 for i in range(len(_lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(_lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_lowerCamelCase ) dup_set.add(_lowerCamelCase ) __snake_case : List[Any] = inits[j].data_type __snake_case : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , _lowerCamelCase ) total_reduced_size += mem_size __snake_case : Any = inits[i].name __snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(_lowerCamelCase ) else: __snake_case : Dict = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) __snake_case : int = sorted(_lowerCamelCase ) _remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : str = """optimized_""" + model_file_name __snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) onnx.save(_lowerCamelCase , _lowerCamelCase ) return new_model
13
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "google/vivit-b-16x2-kinetics400": ( "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _A ( __lowercase ): lowercase__: str = '''vivit''' def __init__( self : Optional[int] , __magic_name__ : List[Any]=2_24 , __magic_name__ : Optional[int]=32 , __magic_name__ : Optional[Any]=[2, 16, 16] , __magic_name__ : List[Any]=3 , __magic_name__ : Any=7_68 , __magic_name__ : int=12 , __magic_name__ : Dict=12 , __magic_name__ : Optional[Any]=30_72 , __magic_name__ : Dict="gelu_fast" , __magic_name__ : List[str]=0.0 , __magic_name__ : List[str]=0.0 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : Optional[int]=1E-06 , __magic_name__ : Any=True , **__magic_name__ : Dict , ) -> str: """simple docstring""" __snake_case : Union[str, Any] = hidden_size __snake_case : List[Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Optional[Any] = intermediate_size __snake_case : List[Any] = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Dict = attention_probs_dropout_prob __snake_case : str = initializer_range __snake_case : Tuple = layer_norm_eps __snake_case : Union[str, Any] = image_size __snake_case : Optional[int] = num_frames __snake_case : Dict = tubelet_size __snake_case : List[str] = num_channels __snake_case : Optional[Any] = qkv_bias super().__init__(**__magic_name__ )
13
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase = ["small", "medium", "large"] __UpperCamelCase = "lm_head.decoder.weight" __UpperCamelCase = "lm_head.weight" def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : Optional[int] = torch.load(_lowerCamelCase ) __snake_case : Optional[int] = d.pop(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) __UpperCamelCase = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") __UpperCamelCase = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
13
1
'''simple docstring''' import functools def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for day in days ): raise ValueError("""The parameter days should be a list of integers""" ) if len(_lowerCamelCase ) != 3 or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for cost in costs ): raise ValueError("""The parameter costs should be a list of three integers""" ) if len(_lowerCamelCase ) == 0: return 0 if min(_lowerCamelCase ) <= 0: raise ValueError("""All days elements should be greater than 0""" ) if max(_lowerCamelCase ) >= 366: raise ValueError("""All days elements should be less than 366""" ) __snake_case : int = set(_lowerCamelCase ) @functools.cache def dynamic_programming(_lowerCamelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' __UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _a ( ) -> None: """simple docstring""" __snake_case : Dict = input("""Enter message: """ ) __snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ ) __snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): __snake_case : Any = """encrypt""" __snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase ) elif mode.lower().startswith("""d""" ): __snake_case : Optional[int] = """decrypt""" __snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase ) print(F'''\n{mode.title()}ed message:''' ) print(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : str = [] __snake_case : Dict = 0 __snake_case : Optional[int] = key.upper() for symbol in message: __snake_case : Any = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_lowerCamelCase ): __snake_case : Tuple = 0 else: translated.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class _A ( __lowercase ): lowercase__: Optional[int] = '''mctct''' def __init__( self : List[str] , __magic_name__ : Dict=80_65 , __magic_name__ : Tuple=15_36 , __magic_name__ : Optional[Any]=36 , __magic_name__ : Dict=61_44 , __magic_name__ : List[Any]=4 , __magic_name__ : Optional[Any]=3_84 , __magic_name__ : Any=9_20 , __magic_name__ : Optional[int]=1E-5 , __magic_name__ : Tuple=0.3 , __magic_name__ : Dict="relu" , __magic_name__ : int=0.02 , __magic_name__ : Tuple=0.3 , __magic_name__ : Optional[int]=0.3 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[str]=0 , __magic_name__ : Dict=2 , __magic_name__ : str=1 , __magic_name__ : Any=0.3 , __magic_name__ : Optional[Any]=1 , __magic_name__ : Union[str, Any]=(7,) , __magic_name__ : List[str]=(3,) , __magic_name__ : Dict=80 , __magic_name__ : List[str]=1 , __magic_name__ : Any=None , __magic_name__ : int="sum" , __magic_name__ : Dict=False , **__magic_name__ : Tuple , ) -> str: """simple docstring""" super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ ) __snake_case : str = vocab_size __snake_case : Optional[Any] = hidden_size __snake_case : Tuple = num_hidden_layers __snake_case : Any = intermediate_size __snake_case : List[str] = num_attention_heads __snake_case : Any = attention_head_dim __snake_case : Any = max_position_embeddings __snake_case : Any = layer_norm_eps __snake_case : List[str] = layerdrop __snake_case : str = hidden_act __snake_case : Optional[int] = initializer_range __snake_case : Optional[int] = hidden_dropout_prob __snake_case : List[str] = attention_probs_dropout_prob __snake_case : Dict = pad_token_id __snake_case : Optional[int] = bos_token_id __snake_case : List[str] = eos_token_id __snake_case : Optional[Any] = conv_glu_dim __snake_case : List[str] = conv_dropout __snake_case : Union[str, Any] = num_conv_layers __snake_case : Optional[int] = input_feat_per_channel __snake_case : List[str] = input_channels __snake_case : Any = conv_channels __snake_case : Optional[Any] = ctc_loss_reduction __snake_case : Optional[Any] = ctc_zero_infinity # prevents config testing fail with exporting to json __snake_case : List[str] = list(__magic_name__ ) __snake_case : List[str] = list(__magic_name__ ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """ f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
13
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for attribute in key.split(""".""" ): __snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case : Union[str, Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : Tuple = value elif weight_type == "bias": __snake_case : str = value else: __snake_case : List[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = [] __snake_case : List[Any] = fairseq_model.state_dict() __snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : Any = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __snake_case : Dict = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2] __snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase ) if "weight_g" in name: __snake_case : Dict = """weight_g""" elif "weight_v" in name: __snake_case : List[str] = """weight_v""" elif "weight" in name: __snake_case : str = """weight""" elif "bias" in name: __snake_case : int = """bias""" else: __snake_case : int = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Dict = full_name.split("""conv_layers.""" )[-1] __snake_case : Optional[int] = name.split(""".""" ) __snake_case : Dict = int(items[0] ) __snake_case : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case : List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : List[str] = SEWConfig() if is_finetuned: __snake_case : List[Any] = model.wav_encoder.wav_model.cfg else: __snake_case : Optional[Any] = model.cfg __snake_case : Tuple = fs_config.conv_bias __snake_case : List[Any] = eval(fs_config.conv_feature_layers ) __snake_case : List[Any] = [x[0] for x in conv_layers] __snake_case : Dict = [x[1] for x in conv_layers] __snake_case : Tuple = [x[2] for x in conv_layers] __snake_case : List[str] = """gelu""" __snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" __snake_case : Optional[int] = 0.0 __snake_case : Optional[Any] = fs_config.activation_fn.name __snake_case : Dict = fs_config.encoder_embed_dim __snake_case : Dict = 0.02 __snake_case : Any = fs_config.encoder_ffn_embed_dim __snake_case : Tuple = 1E-5 __snake_case : Dict = fs_config.encoder_layerdrop __snake_case : Any = fs_config.encoder_attention_heads __snake_case : int = fs_config.conv_pos_groups __snake_case : Tuple = fs_config.conv_pos __snake_case : Optional[int] = len(_lowerCamelCase ) __snake_case : int = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : Union[str, Any] = model.cfg __snake_case : Tuple = fs_config.final_dropout __snake_case : Tuple = fs_config.layerdrop __snake_case : Any = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : List[Any] = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : str = fs_config.mask_channel_length __snake_case : Any = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : str = fs_config.mask_prob __snake_case : str = """Wav2Vec2FeatureExtractor""" __snake_case : Dict = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int: """simple docstring""" if is_finetuned: __snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase ) else: __snake_case : int = convert_config(model[0] , _lowerCamelCase ) __snake_case : Dict = model[0].eval() __snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __snake_case : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if is_finetuned: if dict_path: __snake_case : str = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Union[str, Any] = target_dict.pad_index __snake_case : Optional[Any] = target_dict.bos_index __snake_case : Tuple = target_dict.pad_index __snake_case : List[str] = target_dict.bos_index __snake_case : Optional[Any] = target_dict.eos_index __snake_case : List[str] = len(target_dict.symbols ) __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" ) if not os.path.isdir(_lowerCamelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _lowerCamelCase ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case : List[str] = SEWForCTC(_lowerCamelCase ) else: __snake_case : List[str] = SEWModel(_lowerCamelCase ) feature_extractor.save_pretrained(_lowerCamelCase ) recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __UpperCamelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
13
1
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class _A ( unittest.TestCase ): def __init__( self : Optional[int] , __magic_name__ : Any , __magic_name__ : Tuple=13 , __magic_name__ : str=7 , __magic_name__ : Dict=True , __magic_name__ : Tuple=True , __magic_name__ : int=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=99 , __magic_name__ : List[str]=32 , __magic_name__ : Optional[Any]=5 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : int="gelu" , __magic_name__ : str=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Dict=5_12 , __magic_name__ : List[Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.02 , __magic_name__ : Optional[Any]=4 , ) -> Dict: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : Any = seq_length __snake_case : Optional[int] = is_training __snake_case : str = use_attention_mask __snake_case : List[str] = use_token_type_ids __snake_case : Any = use_labels __snake_case : Optional[Any] = vocab_size __snake_case : List[str] = hidden_size __snake_case : str = num_hidden_layers __snake_case : Any = num_attention_heads __snake_case : List[Any] = intermediate_size __snake_case : int = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : Optional[Any] = max_position_embeddings __snake_case : str = type_vocab_size __snake_case : int = type_sequence_label_size __snake_case : str = initializer_range __snake_case : Any = num_choices def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = None if self.use_attention_mask: __snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Optional[int] = None if self.use_token_type_ids: __snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Tuple = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : List[str] = config_and_inputs __snake_case : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self : Dict ) -> int: """simple docstring""" __snake_case : str = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : int = config_and_inputs __snake_case : str = True __snake_case : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class _A ( __lowercase , unittest.TestCase ): lowercase__: int = True lowercase__: List[str] = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Optional[int] = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase__ ( self : int ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: __snake_case : Optional[Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__magic_name__ ) __snake_case : Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ ) @require_flax class _A ( unittest.TestCase ): @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__magic_name__ ) __snake_case : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) __snake_case : List[Any] = model(__magic_name__ )[0] __snake_case : Optional[Any] = [1, 11, 5_02_65] self.assertEqual(list(output.shape ) , __magic_name__ ) # compare the actual values for a slice. __snake_case : List[Any] = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow def lowercase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __snake_case : Tuple = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__magic_name__ ) __snake_case : Union[str, Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) __snake_case : Dict = model(__magic_name__ )[0] # compare the actual values for a slice. __snake_case : Dict = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : Optional[int] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( _lowerCamelCase = 5000 ) -> int: """simple docstring""" __snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[int] = pentagonal_nums[j] __snake_case : str = pentagonal_i + pentagonal_j __snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
13
1
'''simple docstring''' import os import numpy import onnx def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Optional[int] = a.name __snake_case : Dict = b.name __snake_case : Optional[int] = """""" __snake_case : int = """""" __snake_case : Any = a == b __snake_case : List[Any] = name_a __snake_case : List[str] = name_b return res def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_lowerCamelCase , _lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Dict = list(model.graph.initializer ) __snake_case : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __snake_case : Tuple = inits[i].name __snake_case : Tuple = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : str = os.path.dirname(_lowerCamelCase ) __snake_case : Dict = os.path.basename(_lowerCamelCase ) __snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) ) __snake_case : Dict = list(model.graph.initializer ) __snake_case : Optional[int] = set() __snake_case : Optional[Any] = {} __snake_case : Tuple = [] __snake_case : List[Any] = 0 for i in range(len(_lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(_lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_lowerCamelCase ) dup_set.add(_lowerCamelCase ) __snake_case : List[Any] = inits[j].data_type __snake_case : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , _lowerCamelCase ) total_reduced_size += mem_size __snake_case : Any = inits[i].name __snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(_lowerCamelCase ) else: __snake_case : Dict = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) __snake_case : int = sorted(_lowerCamelCase ) _remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : str = """optimized_""" + model_file_name __snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) onnx.save(_lowerCamelCase , _lowerCamelCase ) return new_model
13
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : List[Any] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __snake_case : int = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __snake_case : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above __snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __snake_case : Dict = output[output != -float("""inf""" )] __snake_case : Optional[Any] = tf.cast( tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @require_tf class _A ( unittest.TestCase , __lowercase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase__: Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" __snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Optional[int] = 2 __snake_case : str = 2 class _A ( tf.Module ): def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Dict = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" __snake_case : Tuple = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : int = [[2, 0], [1_02, 1_03]] __snake_case : Tuple = [[1, 0], [1, 1]] __snake_case : Union[str, Any] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for batch_size in range(1 , len(__magic_name__ ) + 1 ): __snake_case : Union[str, Any] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } __snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""] __snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Dict = 1 __snake_case : int = 2 class _A ( tf.Module ): def __init__( self : Tuple , __magic_name__ : List[str] ) -> int: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : Union[str, Any] = [[2], [1_02, 1_03]] __snake_case : Tuple = [[1], [1, 1]] __snake_case : List[str] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for input_row in range(len(__magic_name__ ) ): __snake_case : Tuple = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } __snake_case : str = serving_func(**__magic_name__ )["""sequences"""] __snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow @require_tensorflow_text def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ ) class _A ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ) -> int: """simple docstring""" super().__init__() __snake_case : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() ) __snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ ) __snake_case , __snake_case : List[Any] = text.pad_model_inputs( __magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ ) return self.tokenizer.detokenize(__magic_name__ ) __snake_case : int = CompleteSentenceTransformer() __snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) __snake_case : Tuple = complete_model(__magic_name__ ) __snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ ) keras_model.save(__magic_name__ ) def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } __snake_case : str = 14 __snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : int = """Hello, my dog is cute and""" __snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" ) __snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : List[Any] = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __snake_case : Dict = [6_38, 1_98] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : str = """Hugging Face is a technology company based in New York and Paris.""" __snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids __snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : int = bart_model.generate(__magic_name__ ).numpy() class _A ( __lowercase ): def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) ) class _A ( bart_model.model.encoder.__class__ ): def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) __snake_case : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __snake_case : Dict = bart_model.generate(__magic_name__ ).numpy() with self.assertRaises(__magic_name__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__magic_name__ , foo="""bar""" )
13
1
'''simple docstring''' class _A : def __init__( self : Tuple , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Any = name __snake_case : Dict = val def __str__( self : List[Any] ) -> Tuple: """simple docstring""" return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self : Any , __magic_name__ : Dict ) -> Dict: """simple docstring""" return self.val < other.val class _A : def __init__( self : Tuple , __magic_name__ : Any ) -> str: """simple docstring""" __snake_case : Dict = {} __snake_case : Dict = {} __snake_case : Dict = self.build_heap(__magic_name__ ) def __getitem__( self : str , __magic_name__ : Tuple ) -> Optional[int]: """simple docstring""" return self.get_value(__magic_name__ ) def lowercase__ ( self : Optional[Any] , __magic_name__ : str ) -> Optional[Any]: """simple docstring""" return (idx - 1) // 2 def lowercase__ ( self : Union[str, Any] , __magic_name__ : str ) -> Any: """simple docstring""" return idx * 2 + 1 def lowercase__ ( self : Tuple , __magic_name__ : int ) -> Union[str, Any]: """simple docstring""" return idx * 2 + 2 def lowercase__ ( self : Any , __magic_name__ : Dict ) -> List[str]: """simple docstring""" return self.heap_dict[key] def lowercase__ ( self : Dict , __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Dict = len(__magic_name__ ) - 1 __snake_case : Dict = self.get_parent_idx(__magic_name__ ) for idx, i in enumerate(__magic_name__ ): __snake_case : Dict = idx __snake_case : Tuple = i.val for i in range(__magic_name__ , -1 , -1 ): self.sift_down(__magic_name__ , __magic_name__ ) return array def lowercase__ ( self : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" while True: __snake_case : Tuple = self.get_left_child_idx(__magic_name__ ) # noqa: E741 __snake_case : int = self.get_right_child_idx(__magic_name__ ) __snake_case : Union[str, Any] = idx if l < len(__magic_name__ ) and array[l] < array[idx]: __snake_case : List[Any] = l if r < len(__magic_name__ ) and array[r] < array[smallest]: __snake_case : Dict = r if smallest != idx: __snake_case , __snake_case : str = array[smallest], array[idx] ( ( __snake_case ) , ( __snake_case ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) __snake_case : Optional[int] = smallest else: break def lowercase__ ( self : int , __magic_name__ : Tuple ) -> Optional[Any]: """simple docstring""" __snake_case : str = self.get_parent_idx(__magic_name__ ) while p >= 0 and self.heap[p] > self.heap[idx]: __snake_case , __snake_case : int = self.heap[idx], self.heap[p] __snake_case , __snake_case : List[Any] = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) __snake_case : Any = p __snake_case : Optional[int] = self.get_parent_idx(__magic_name__ ) def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" return self.heap[0] def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __snake_case , __snake_case : List[str] = self.heap[-1], self.heap[0] __snake_case , __snake_case : str = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) __snake_case : str = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowercase__ ( self : str , __magic_name__ : Dict ) -> Dict: """simple docstring""" self.heap.append(__magic_name__ ) __snake_case : Dict = len(self.heap ) - 1 __snake_case : Dict = node.val self.sift_up(len(self.heap ) - 1 ) def lowercase__ ( self : int ) -> int: """simple docstring""" return len(self.heap ) == 0 def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" __snake_case : Tuple = new_value __snake_case : Union[str, Any] = new_value self.sift_up(self.idx_of_element[node] ) __UpperCamelCase = Node("R", -1) __UpperCamelCase = Node("B", 6) __UpperCamelCase = Node("A", 3) __UpperCamelCase = Node("X", 1) __UpperCamelCase = Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __UpperCamelCase = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" __snake_case : int = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def _a ( _lowerCamelCase ) -> None: """simple docstring""" __snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("""""" ) print(len(_lowerCamelCase ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
1
'''simple docstring''' import os import jsonlines import numpy as np from tqdm import tqdm __UpperCamelCase = 2048 __UpperCamelCase = 4096 __UpperCamelCase = 42 __UpperCamelCase = os.environ.pop("PROCESS_TRAIN", "false") __UpperCamelCase = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4} def _a ( _lowerCamelCase ) -> Optional[Any]: """simple docstring""" def choose_first(_lowerCamelCase , _lowerCamelCase=False ): assert isinstance(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) == 1: __snake_case : Any = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: __snake_case : Union[str, Any] = {k: [a[k]] for k in a} if len(a["""start_token"""] ) > 0: break return a __snake_case : Union[str, Any] = {"""id""": example["""id"""]} __snake_case : Any = example["""annotations"""] __snake_case : List[Any] = annotation["""yes_no_answer"""] if 0 in yes_no_answer or 1 in yes_no_answer: __snake_case : Tuple = ["""yes"""] if 1 in yes_no_answer else ["""no"""] __snake_case : Optional[Any] = [] __snake_case : Tuple = [] __snake_case : Optional[Any] = ["""<cls>"""] else: __snake_case : Union[str, Any] = ["""short"""] __snake_case : Optional[Any] = choose_first(annotation["""short_answers"""] ) if len(out["""start_token"""] ) == 0: # answer will be long if short is not available __snake_case : Dict = ["""long"""] __snake_case : str = choose_first(annotation["""long_answer"""] , is_long_answer=_lowerCamelCase ) __snake_case : Optional[int] = [] answer.update(_lowerCamelCase ) # disregard some samples if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]: __snake_case : List[Any] = True else: __snake_case : int = False __snake_case : Any = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""] if not all(isinstance(answer[k] , _lowerCamelCase ) for k in cols ): raise ValueError("""Issue in ID""" , example["""id"""] ) return answer def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: """simple docstring""" __snake_case : List[Any] = _get_single_answer(_lowerCamelCase ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element __snake_case : Optional[Any] = example["""document"""]["""tokens"""] __snake_case : Union[str, Any] = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) return { "context": " ".join(_lowerCamelCase ), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples __snake_case : Union[str, Any] = ["""start_token""", """end_token"""] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 __snake_case : str = example["""document"""]["""tokens"""] __snake_case : Any = answer["""start_token"""] __snake_case : Optional[int] = answer["""end_token"""] __snake_case : int = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 __snake_case : Dict = """ """.join(context[start_token:end_token] ) # checking above code if assertion: __snake_case : str = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]] __snake_case : Union[str, Any] = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]] __snake_case : Dict = """ """.join([old[i] for i in range(len(_lowerCamelCase ) ) if not is_html[i]] ) if new != old: print("""ID:""" , example["""id"""] ) print("""New:""" , _lowerCamelCase , end="""\n""" ) print("""Old:""" , _lowerCamelCase , end="""\n\n""" ) return { "context": " ".join(_lowerCamelCase ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=2048 , _lowerCamelCase=4096 , _lowerCamelCase=True ) -> Dict: """simple docstring""" __snake_case : List[Any] = get_context_and_ans(_lowerCamelCase , assertion=_lowerCamelCase ) __snake_case : Dict = out["""answer"""] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } __snake_case : List[Any] = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids __snake_case : Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element __snake_case : int = [] __snake_case : List[str] = [] __snake_case : Optional[int] = input_ids[:q_len] __snake_case : Dict = range(_lowerCamelCase , len(_lowerCamelCase ) , max_length - doc_stride ) for i in doc_start_indices: __snake_case : Dict = i + max_length - q_len __snake_case : Tuple = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer["""category"""][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(_lowerCamelCase ), "end_token": [-100] * len(_lowerCamelCase ), "category": category, }, } __snake_case : Optional[int] = out["""context"""].split() __snake_case : Optional[int] = splitted_context[answer["""end_token"""]] __snake_case : Any = len( tokenizer( """ """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=_lowerCamelCase , ).input_ids ) __snake_case : Tuple = len( tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=_lowerCamelCase ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token __snake_case : Tuple = len(tokenizer(_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 __snake_case : Union[str, Any] = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive __snake_case : Optional[int] = answer["""start_token"""] __snake_case : List[Any] = answer["""end_token"""] if assertion: __snake_case : Any = tokenizer.decode(_lowerCamelCase ) if answer["span"] != new: print("""ISSUE IN TOKENIZATION""" ) print("""OLD:""" , answer["""span"""] ) print("""NEW:""" , _lowerCamelCase , end="""\n\n""" ) if len(_lowerCamelCase ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } __snake_case : List[Any] = input_ids[:q_len] __snake_case : Tuple = range(_lowerCamelCase , len(_lowerCamelCase ) , max_length - doc_stride ) __snake_case : Optional[Any] = [] __snake_case : List[Any] = [] __snake_case : Optional[int] = [] __snake_case : Tuple = [] # null, yes, no, long, short for i in doc_start_indices: __snake_case : Dict = i + max_length - q_len __snake_case : Union[str, Any] = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: __snake_case : str = start_token - i + q_len __snake_case : int = end_token - i + q_len answers_category.append(answer["""category"""][0] ) # ["short"] -> "short" else: __snake_case : Optional[Any] = -100 __snake_case : Optional[Any] = -100 answers_category.append("""null""" ) __snake_case : List[str] = inputs[-1][start_token : end_token + 1] answers_start_token.append(_lowerCamelCase ) answers_end_token.append(_lowerCamelCase ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print("""ISSUE in strided for ID:""" , example["""id"""] ) print("""New:""" , tokenizer.decode(_lowerCamelCase ) ) print("""Old:""" , tokenizer.decode(_lowerCamelCase ) , end="""\n\n""" ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=2048 , _lowerCamelCase=4096 , _lowerCamelCase=False ) -> int: """simple docstring""" __snake_case : Union[str, Any] = get_strided_contexts_and_ans( _lowerCamelCase , _lowerCamelCase , doc_stride=_lowerCamelCase , max_length=_lowerCamelCase , assertion=_lowerCamelCase , ) return example def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: """simple docstring""" with jsonlines.open(_lowerCamelCase , """a""" ) as writer: for example in tqdm(_lowerCamelCase , total=len(_lowerCamelCase ) , desc="""Saving samples ... """ ): __snake_case : int = example["""labels"""] for ids, start, end, cat in zip( example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { """input_ids""": ids, """start_token""": start, """end_token""": end, """category""": CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer __UpperCamelCase = load_dataset("natural_questions") __UpperCamelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") __UpperCamelCase = data["train" if PROCESS_TRAIN == "true" else "validation"] __UpperCamelCase = { "tokenizer": tokenizer, "doc_stride": DOC_STRIDE, "max_length": MAX_LENGTH, "assertion": False, } __UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs) __UpperCamelCase = data.remove_columns(["annotations", "document", "id", "question"]) print(data) np.random.seed(SEED) __UpperCamelCase = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl" save_to_disk(data, file_name=cache_file_name)
13
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCamelCase = logging.getLogger(__name__) class _A ( __lowercase ): def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" super().__init__( __magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , ) __snake_case : List[str] = None def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]: """simple docstring""" logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __snake_case : List[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __snake_case : List[str] = str(distributed_port + 1 ) __snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase__ ( self : int ) -> int: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ ) dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group ) return target_tensor def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ ) return ifname def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): __snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ ) # distributed training __snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic __snake_case : Tuple = None if self._is_main(): __snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )] dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group ) # scatter logic __snake_case : Optional[int] = question_hidden_states.shape[0] __snake_case : Optional[Any] = [] __snake_case : Any = [] if self._is_main(): assert len(__magic_name__ ) == world_size __snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ ) __snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa ) __snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
13
1
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" if isinstance(_lowerCamelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class _A : def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : int ) -> str: """simple docstring""" pass def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : str , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any]=None , **__magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ ) __snake_case : List[str] = TFVisionTextDualEncoderModel(__magic_name__ ) __snake_case : int = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None , **__magic_name__ : Dict ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : Union[str, Any] = self.get_vision_text_model(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ ) __snake_case : Optional[int] = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowercase__ ( self : Any , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : str=None , **__magic_name__ : Dict ) -> Tuple: """simple docstring""" __snake_case , __snake_case : Optional[Any] = self.get_vision_text_model(__magic_name__ , __magic_name__ ) __snake_case : List[str] = {"""vision_model""": vision_model, """text_model""": text_model} __snake_case : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ ) __snake_case : Union[str, Any] = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : int=None , **__magic_name__ : Tuple ) -> List[Any]: """simple docstring""" __snake_case , __snake_case : Dict = self.get_vision_text_model(__magic_name__ , __magic_name__ ) __snake_case : Tuple = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ ) __snake_case : Any = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ ) __snake_case : str = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__magic_name__ ) __snake_case : str = TFVisionTextDualEncoderModel.from_pretrained(__magic_name__ ) __snake_case : Optional[int] = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ ) __snake_case : int = after_output[0].numpy() __snake_case : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__magic_name__ , 1E-5 ) def lowercase__ ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None , **__magic_name__ : Dict ) -> str: """simple docstring""" __snake_case , __snake_case : Dict = self.get_vision_text_model(__magic_name__ , __magic_name__ ) __snake_case : List[Any] = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ ) __snake_case : Dict = model( input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , output_attentions=__magic_name__ ) __snake_case : Optional[Any] = output.vision_model_output.attentions self.assertEqual(len(__magic_name__ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __snake_case : Any = to_atuple(vision_model.config.image_size ) __snake_case : Tuple = to_atuple(vision_model.config.patch_size ) __snake_case : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __snake_case : Union[str, Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __snake_case : Any = output.text_model_output.attentions self.assertEqual(len(__magic_name__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowercase__ ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : float ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = np.abs((a - b) ).max() self.assertLessEqual(__magic_name__ , __magic_name__ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def lowercase__ ( self : List[str] ) -> str: """simple docstring""" __snake_case : Dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**__magic_name__ ) def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" __snake_case : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __snake_case : Tuple = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__magic_name__ ) def lowercase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __snake_case : int = self.prepare_config_and_inputs() self.check_save_load(**__magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : List[str] = self.get_pretrained_model_and_inputs() __snake_case : Any = model_a(**__magic_name__ ) __snake_case : Any = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__magic_name__ ) __snake_case : List[str] = TFVisionTextDualEncoderModel.from_pretrained(__magic_name__ ) __snake_case : Optional[int] = model_a(**__magic_name__ ) __snake_case : Union[str, Any] = after_outputs[0].numpy() __snake_case : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__magic_name__ , 1E-5 ) @require_tf class _A ( __lowercase , unittest.TestCase ): def lowercase__ ( self : Any ) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) __snake_case : Tuple = 13 __snake_case : List[str] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __snake_case : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __snake_case : Optional[Any] = random_attention_mask([batch_size, 4] ) __snake_case : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowercase__ ( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = TFViTModel(__magic_name__ , name="""vision_model""" ) __snake_case : int = TFBertModel(__magic_name__ , name="""text_model""" ) return vision_model, text_model def lowercase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __snake_case : Any = TFViTModelTester(self ) __snake_case : Optional[Any] = TFBertModelTester(self ) __snake_case : List[str] = vit_model_tester.prepare_config_and_inputs() __snake_case : Tuple = bert_model_tester.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Optional[Any] = vision_config_and_inputs ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[str] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _A ( __lowercase , unittest.TestCase ): def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __snake_case : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) __snake_case : int = 13 __snake_case : List[str] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __snake_case : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __snake_case : Tuple = random_attention_mask([batch_size, 4] ) __snake_case : Optional[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Tuple=None , **__magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : Any = self.get_vision_text_model(__magic_name__ , __magic_name__ ) __snake_case : int = TFVisionTextDualEncoderModel(vision_model=__magic_name__ , text_model=__magic_name__ ) __snake_case : Any = model( input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , output_attentions=__magic_name__ ) __snake_case : List[str] = output.vision_model_output.attentions self.assertEqual(len(__magic_name__ ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __snake_case : Optional[int] = to_atuple(vision_model.config.image_size ) __snake_case : Optional[int] = to_atuple(vision_model.config.patch_size ) __snake_case : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __snake_case : List[str] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __snake_case : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(__magic_name__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : int = TFDeiTModel(__magic_name__ , name="""vision_model""" ) __snake_case : Optional[Any] = TFRobertaModel(__magic_name__ , name="""text_model""" ) return vision_model, text_model def lowercase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" __snake_case : Dict = TFDeiTModelTester(self ) __snake_case : Optional[int] = TFRobertaModelTester(self ) __snake_case : Dict = vit_model_tester.prepare_config_and_inputs() __snake_case : Optional[int] = bert_model_tester.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : int = vision_config_and_inputs ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Optional[int] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _A ( __lowercase , unittest.TestCase ): def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) __snake_case : Optional[int] = 13 __snake_case : List[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __snake_case : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __snake_case : Optional[int] = random_attention_mask([batch_size, 4] ) __snake_case : Dict = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowercase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> Any: """simple docstring""" __snake_case : Tuple = TFCLIPVisionModel(__magic_name__ , name="""vision_model""" ) __snake_case : str = TFBertModel(__magic_name__ , name="""text_model""" ) return vision_model, text_model def lowercase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : List[Any] = TFCLIPVisionModelTester(self ) __snake_case : Dict = TFBertModelTester(self ) __snake_case : str = clip_model_tester.prepare_config_and_inputs() __snake_case : Tuple = bert_model_tester.prepare_config_and_inputs() __snake_case , __snake_case : Optional[int] = vision_config_and_inputs ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class _A ( unittest.TestCase ): @slow def lowercase__ ( self : Optional[Any] ) -> Any: """simple docstring""" __snake_case : int = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=__magic_name__ ) __snake_case : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __snake_case : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__magic_name__ , padding=__magic_name__ , return_tensors="""np""" ) __snake_case : str = model(**__magic_name__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __snake_case : Optional[Any] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __magic_name__ , atol=1E-3 ) )
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class _A : lowercase__: str lowercase__: Optional[str] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return self.major, self.minor, self.patch def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return Version(__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): return other raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' ) def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]: """simple docstring""" try: __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) return self.tuple < other.tuple def __hash__( self : Any ) -> Any: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase__ ( self : str ) -> str: """simple docstring""" return self.version_str def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase ) if not res: raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" return ".".join(str(_lowerCamelCase ) for v in version_tuple )
13
1
'''simple docstring''' def _a ( _lowerCamelCase=2_8123 ) -> int: """simple docstring""" __snake_case : List[Any] = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i __snake_case : Dict = set() __snake_case : Union[str, Any] = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(_lowerCamelCase ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) __snake_case : Tuple = """""" while len(_lowerCamelCase ) % 3 != 0: __snake_case : Any = """0""" + bin_string __snake_case : Tuple = [ bin_string[index : index + 3] for index in range(len(_lowerCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __snake_case : Tuple = 0 for index, val in enumerate(_lowerCamelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) ) oct_string += str(_lowerCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
13
1
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase ) -> list[int]: """simple docstring""" __snake_case : Any = [True] * limit __snake_case : int = False __snake_case : str = False __snake_case : Dict = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): __snake_case : str = i * 2 while index < limit: __snake_case : int = False __snake_case : List[str] = index + i __snake_case : Dict = [2] for i in range(3 , _lowerCamelCase , 2 ): if is_prime[i]: primes.append(_lowerCamelCase ) return primes def _a ( _lowerCamelCase = 100_0000 ) -> int: """simple docstring""" __snake_case : Union[str, Any] = prime_sieve(_lowerCamelCase ) __snake_case : Dict = 0 __snake_case : Union[str, Any] = 0 for i in range(len(_lowerCamelCase ) ): for j in range(i + length , len(_lowerCamelCase ) ): __snake_case : Optional[Any] = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: __snake_case : Optional[int] = j - i __snake_case : int = sol return largest if __name__ == "__main__": print(f"""{solution() = }""")
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' from sklearn.metrics import recall_score import datasets __UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" __UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" __UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def lowercase__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def lowercase__ ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=1 , __magic_name__ : Optional[Any]="binary" , __magic_name__ : List[Any]=None , __magic_name__ : List[str]="warn" , ) -> Optional[Any]: """simple docstring""" __snake_case : int = recall_score( __magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , ) return {"recall": float(__magic_name__ ) if score.size == 1 else score}
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
1
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" __snake_case : str = AutoConfig.from_pretrained(_lowerCamelCase ) __snake_case : Optional[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase ) __snake_case : Optional[int] = checkpoints.load_tax_checkpoint(_lowerCamelCase ) __snake_case : int = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""] if config.model_type == "t5": __snake_case : Optional[Any] = """SelfAttention""" if config.model_type == "longt5" and config.encoder_attention_type == "local": __snake_case : List[Any] = """LocalSelfAttention""" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : Tuple = """TransientGlobalSelfAttention""" else: raise ValueError( """Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`""" """ attribute with a value from ['local', 'transient-global].""" ) # Encoder for layer_index in range(config.num_layers ): __snake_case : str = F'''layers_{str(_lowerCamelCase )}''' # Self-Attention __snake_case : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""] __snake_case : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""] __snake_case : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""] __snake_case : int = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""] # Layer Normalization __snake_case : int = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""] if split_mlp_wi: __snake_case : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] __snake_case : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: __snake_case : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] __snake_case : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization __snake_case : List[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning __snake_case : int = flax_model.params["""encoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""] __snake_case : Union[str, Any] = tax_attention_key __snake_case : Dict = tax_attention_out __snake_case : Optional[Any] = tax_attention_query __snake_case : Optional[Any] = tax_attention_value __snake_case : Union[str, Any] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : Dict = tax_global_layer_norm if split_mlp_wi: __snake_case : List[str] = tax_mlp_wi_a __snake_case : int = tax_mlp_wi_a else: __snake_case : Tuple = tax_mlp_wi __snake_case : Any = tax_mlp_wo __snake_case : Optional[int] = tax_mlp_layer_norm __snake_case : List[str] = flax_model_encoder_layer_block # Only for layer 0: __snake_case : Dict = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T __snake_case : Dict = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : Optional[int] = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T __snake_case : Dict = tax_encoder_global_rel_embedding # Assigning __snake_case : Dict = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""] __snake_case : str = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): __snake_case : Dict = F'''layers_{str(_lowerCamelCase )}''' # Self-Attention __snake_case : Dict = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""] __snake_case : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""] __snake_case : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""] __snake_case : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""] # Layer Normalization __snake_case : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][ """scale""" ] # Encoder-Decoder-Attention __snake_case : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""] __snake_case : Tuple = tax_enc_dec_attention_module["""key"""]["""kernel"""] __snake_case : Optional[int] = tax_enc_dec_attention_module["""out"""]["""kernel"""] __snake_case : Any = tax_enc_dec_attention_module["""query"""]["""kernel"""] __snake_case : Optional[int] = tax_enc_dec_attention_module["""value"""]["""kernel"""] # Layer Normalization __snake_case : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""] # MLP if split_mlp_wi: __snake_case : str = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] __snake_case : str = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: __snake_case : int = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] __snake_case : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization __snake_case : Tuple = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning __snake_case : List[str] = flax_model.params["""decoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""] __snake_case : Optional[int] = tax_attention_key __snake_case : Union[str, Any] = tax_attention_out __snake_case : Any = tax_attention_query __snake_case : List[str] = tax_attention_value __snake_case : List[Any] = tax_pre_attention_layer_norm __snake_case : List[str] = tax_enc_dec_attention_key __snake_case : Union[str, Any] = tax_enc_dec_attention_out __snake_case : Dict = tax_enc_dec_attention_query __snake_case : int = tax_enc_dec_attention_value __snake_case : Optional[Any] = tax_cross_layer_norm if split_mlp_wi: __snake_case : int = tax_mlp_wi_a __snake_case : Any = tax_mlp_wi_a else: __snake_case : Any = tax_mlp_wi __snake_case : str = tax_mlp_wo __snake_case : Optional[Any] = txa_mlp_layer_norm __snake_case : int = flax_model_decoder_layer_block # Decoder Normalization __snake_case : Any = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""] __snake_case : Optional[Any] = txa_decoder_norm # Only for layer 0: __snake_case : Union[str, Any] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T __snake_case : Union[str, Any] = tax_decoder_rel_embedding # Token Embeddings __snake_case : Optional[Any] = tax_model["""target"""]["""token_embedder"""]["""embedding"""] __snake_case : Any = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: __snake_case : Tuple = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""] flax_model.save_pretrained(_lowerCamelCase ) print("""T5X Model was sucessfully converted!""" ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) __UpperCamelCase = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
13
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = image_size __snake_case : Tuple = patch_size __snake_case : str = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : Dict = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : List[Any] = shape_size __snake_case : Tuple = num_labels __snake_case : List[Any] = num_choices __snake_case : Optional[Any] = scope __snake_case : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : List[str] = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __snake_case : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Union[str, Any] = bbox[i, j, 3] __snake_case : Union[str, Any] = bbox[i, j, 1] __snake_case : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Optional[Any] = bbox[i, j, 2] __snake_case : Tuple = bbox[i, j, 0] __snake_case : Optional[Any] = tmp_coordinate __snake_case : Dict = tf.constant(__magic_name__ ) __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ ) # text + image __snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) __snake_case : List[str] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , ) __snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Any = self.num_labels __snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ ) __snake_case : Tuple = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = 2 __snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs __snake_case : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase__: Union[str, Any] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase__: Dict = False lowercase__: int = False lowercase__: Dict = False def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" return True def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict: """simple docstring""" __snake_case : Any = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = { k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : str = TFLayoutLMvaModelTester(self ) __snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ): # The number of elements in the loss should be the same as the number of elements in the label __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0] ] __snake_case : List[str] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = prepared_for_class.pop("""input_ids""" ) __snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : str = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __snake_case : str = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __snake_case : Dict = -1_00 __snake_case : str = tf.convert_to_tensor(__magic_name__ ) __snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = model(__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) # Get keys that were added with the _prepare_for_class function __snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys() __snake_case : Optional[Any] = inspect.signature(model.call ).parameters __snake_case : int = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __snake_case : Union[str, Any] = {0: """input_ids"""} for label_key in label_keys: __snake_case : int = signature_names.index(__magic_name__ ) __snake_case : Optional[int] = label_key __snake_case : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __snake_case : List[str] = prepared_for_class[value] __snake_case : str = tuple(__magic_name__ ) # Send to model __snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Tuple = type self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __snake_case : str = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values __snake_case : Tuple = tf.constant([[1, 2]] ) __snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) # verify the logits __snake_case : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Tuple = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = IFPipeline lowercase__: Union[str, Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} lowercase__: List[str] = TEXT_TO_IMAGE_BATCH_PARAMS lowercase__: List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def lowercase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return self._get_dummy_components() def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=0 ) -> Dict: """simple docstring""" if str(__magic_name__ ).startswith("""mps""" ): __snake_case : str = torch.manual_seed(__magic_name__ ) else: __snake_case : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : int = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowercase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowercase__ ( self : List[str] ) -> int: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" self._test_save_load_local() def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case : int = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) __snake_case : int = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=__magic_name__ , tokenizer=__magic_name__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) __snake_case , __snake_case : Tuple = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() __snake_case : Dict = None __snake_case : Dict = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img __snake_case : Optional[int] = IFImgaImgPipeline(**pipe_a.components ) __snake_case : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting __snake_case : str = IFInpaintingPipeline(**pipe_a.components ) __snake_case : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ) -> Optional[Any]: """simple docstring""" _start_torch_memory_measurement() __snake_case : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case : str = pipe_a( prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , num_inference_steps=2 , generator=__magic_name__ , output_type="""np""" , ) __snake_case : List[str] = output.images[0] assert image.shape == (64, 64, 3) __snake_case : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 __snake_case : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(__magic_name__ , __magic_name__ ) # pipeline 2 _start_torch_memory_measurement() __snake_case : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__magic_name__ ) __snake_case : Optional[int] = pipe_a( prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , image=__magic_name__ , generator=__magic_name__ , num_inference_steps=2 , output_type="""np""" , ) __snake_case : int = output.images[0] assert image.shape == (2_56, 2_56, 3) __snake_case : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __snake_case : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(__magic_name__ , __magic_name__ ) def lowercase__ ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> int: """simple docstring""" _start_torch_memory_measurement() __snake_case : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__magic_name__ ) __snake_case : str = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case : str = pipe_a( prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , image=__magic_name__ , num_inference_steps=2 , generator=__magic_name__ , output_type="""np""" , ) __snake_case : str = output.images[0] assert image.shape == (64, 64, 3) __snake_case : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 __snake_case : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(__magic_name__ , __magic_name__ ) # pipeline 2 _start_torch_memory_measurement() __snake_case : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case : List[str] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__magic_name__ ) __snake_case : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__magic_name__ ) __snake_case : Optional[int] = pipe_a( prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , image=__magic_name__ , original_image=__magic_name__ , generator=__magic_name__ , num_inference_steps=2 , output_type="""np""" , ) __snake_case : Optional[Any] = output.images[0] assert image.shape == (2_56, 2_56, 3) __snake_case : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __snake_case : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(__magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Dict ) -> Optional[Any]: """simple docstring""" _start_torch_memory_measurement() __snake_case : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__magic_name__ ) __snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__magic_name__ ) __snake_case : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case : Dict = pipe_a( prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , num_inference_steps=2 , generator=__magic_name__ , output_type="""np""" , ) __snake_case : int = output.images[0] assert image.shape == (64, 64, 3) __snake_case : int = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 __snake_case : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(__magic_name__ , __magic_name__ ) # pipeline 2 _start_torch_memory_measurement() __snake_case : Any = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__magic_name__ ) __snake_case : Optional[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__magic_name__ ) __snake_case : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(__magic_name__ ) __snake_case : int = pipe_a( prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , original_image=__magic_name__ , generator=__magic_name__ , num_inference_steps=2 , output_type="""np""" , ) __snake_case : Tuple = output.images[0] assert image.shape == (2_56, 2_56, 3) __snake_case : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __snake_case : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(__magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
13
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]: """simple docstring""" __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[Any] = num_channels __snake_case : List[str] = patch_size __snake_case : List[str] = num_frames __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : str = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = attention_type __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scope __snake_case : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case : str = (image_size // patch_size) ** 2 __snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case : str = self.num_labels return config def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" __snake_case : Optional[int] = TimesformerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str: """simple docstring""" __snake_case : Any = TimesformerForVideoClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) # verify the logits shape __snake_case : Dict = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__: List[Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__: List[str] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : List[str] = TimesformerModelTester(self ) __snake_case : List[Any] = ConfigTester( self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int: """simple docstring""" __snake_case : Dict = copy.deepcopy(__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = TimesformerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : List[str] = self.model_tester.seq_length __snake_case : Tuple = self.model_tester.num_frames __snake_case : str = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Dict = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Optional[int] = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case : int = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Optional[int] = True __snake_case : Optional[int] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : List[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ): __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.hidden_states __snake_case : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : List[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( __magic_name__ ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : Dict = prepare_video() __snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports __UpperCamelCase = "\nimport os\n" __UpperCamelCase = "\ndef foo():\n import os\n return False\n" __UpperCamelCase = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n" __UpperCamelCase = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n" __UpperCamelCase = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n" __UpperCamelCase = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n" __UpperCamelCase = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n" __UpperCamelCase = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n" __UpperCamelCase = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n" __UpperCamelCase = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n" __UpperCamelCase = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("""case""" , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = os.path.join(_lowerCamelCase , """test_file.py""" ) with open(_lowerCamelCase , """w""" ) as _tmp_file: _tmp_file.write(_lowerCamelCase ) __snake_case : List[str] = get_imports(_lowerCamelCase ) assert parsed_imports == ["os"]
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ConditionalDetrFeatureExtractor"] __UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1