code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=False , lowercase=True , lowercase=False , lowercase=True , lowercase=33 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ): _lowerCamelCase : List[str] = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : Dict = seq_length _lowerCamelCase : Optional[int] = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : int = use_token_type_ids _lowerCamelCase : List[Any] = use_labels _lowerCamelCase : Union[str, Any] = vocab_size _lowerCamelCase : Tuple = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : Dict = intermediate_size _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : List[Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Dict = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : List[str] = type_sequence_label_size _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : Optional[int] = num_labels _lowerCamelCase : Dict = num_choices _lowerCamelCase : Union[str, Any] = scope def A_ ( self ): _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : List[Any] = None if self.use_input_mask: _lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : List[str] = None _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : List[str] = None if self.use_labels: _lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self ): return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : str = EsmModel(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase ) _lowerCamelCase : Optional[Any] = model(lowercase ) _lowerCamelCase : List[Any] = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : int = EsmForMaskedLM(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : int = self.num_labels _lowerCamelCase : Dict = EsmForTokenClassification(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Dict = model(lowercase , attention_mask=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self ): _lowerCamelCase : int = self.prepare_config_and_inputs() ( _lowerCamelCase ) : Union[str, Any] = config_and_inputs _lowerCamelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = False lowerCamelCase__ = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase__ = () lowerCamelCase__ = ( { """feature-extraction""": EsmModel, """fill-mask""": EsmForMaskedLM, """text-classification""": EsmForSequenceClassification, """token-classification""": EsmForTokenClassification, """zero-shot""": EsmForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = True def A_ ( self ): _lowerCamelCase : Dict = EsmModelTester(self ) _lowerCamelCase : List[str] = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def A_ ( self ): _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCamelCase : Any = type self.model_tester.create_and_check_model(*lowercase ) def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase ) def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase ) @slow def A_ ( self ): for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : List[Any] = EsmModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def A_ ( self ): _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()[0] _lowerCamelCase : Dict = EsmEmbeddings(config=lowercase ) _lowerCamelCase : Optional[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) _lowerCamelCase : Union[str, Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) _lowerCamelCase : Any = create_position_ids_from_input_ids(lowercase , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(lowercase , lowercase ) ) ) def A_ ( self ): _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0] _lowerCamelCase : Any = EsmEmbeddings(config=lowercase ) _lowerCamelCase : int = torch.empty(2 , 4 , 30 ) _lowerCamelCase : Tuple = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] _lowerCamelCase : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions] ) _lowerCamelCase : Union[str, Any] = embeddings.create_position_ids_from_inputs_embeds(lowercase ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(lowercase , lowercase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def A_ ( self ): pass @unittest.skip('Esm does not support embedding resizing' ) def A_ ( self ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def A_ ( self ): pass @require_torch class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @slow def A_ ( self ): with torch.no_grad(): _lowerCamelCase : List[Any] = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() _lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _lowerCamelCase : int = model(lowercase )[0] _lowerCamelCase : Optional[Any] = 33 _lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : List[Any] = torch.tensor( [[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) ) @slow def A_ ( self ): with torch.no_grad(): _lowerCamelCase : Any = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() _lowerCamelCase : Dict = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _lowerCamelCase : Union[str, Any] = model(lowercase )[0] # compare the actual values for a slice. _lowerCamelCase : str = torch.tensor( [[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
352
"""simple docstring""" import os import string import sys lowercase__ = 1 << 8 lowercase__ = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } lowercase__ = KEYMAP["""up"""] lowercase__ = KEYMAP["""left"""] if sys.platform == "win32": lowercase__ = [] lowercase__ = { B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): lowercase__ = ord(str(i)) def _snake_case ( ): if os.name == "nt": import msvcrt _lowerCamelCase : Any = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowercase__ ) == 0: # Read the keystroke _lowerCamelCase : str = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowerCamelCase : List[Any] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(lowercase__ ) if ord(lowercase__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) _lowerCamelCase : List[Any] = chr(KEYMAP['esc'] ) except KeyError: _lowerCamelCase : int = cha[1] else: _lowerCamelCase : Optional[int] = ch.decode(lowercase__ ) else: _lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowerCamelCase : List[str] = sys.stdin.fileno() _lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ ) try: tty.setraw(lowercase__ ) _lowerCamelCase : Optional[Any] = sys.stdin.read(1 ) finally: termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ ) return ch def _snake_case ( ): _lowerCamelCase : int = get_raw_chars() if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowercase__ ) == KEYMAP["esc"]: _lowerCamelCase : Union[str, Any] = get_raw_chars() if ord(lowercase__ ) == KEYMAP["mod_int"]: _lowerCamelCase : List[Any] = get_raw_chars() if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowercase__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
12
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase__ = { """configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""], """feature_extraction_whisper""": ["""WhisperFeatureExtractor"""], """processing_whisper""": ["""WhisperProcessor"""], """tokenization_whisper""": ["""WhisperTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["""WhisperTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """WhisperForConditionalGeneration""", """WhisperModel""", """WhisperPreTrainedModel""", """WhisperForAudioClassification""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWhisperForConditionalGeneration""", """TFWhisperModel""", """TFWhisperPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """FlaxWhisperForConditionalGeneration""", """FlaxWhisperModel""", """FlaxWhisperPreTrainedModel""", """FlaxWhisperForAudioClassification""", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
353
"""simple docstring""" from typing import Any def _snake_case ( lowercase__ ): if not input_list: return [] _lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list] _lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = DebertaTokenizer lowerCamelCase__ = True lowerCamelCase__ = DebertaTokenizerFast def A_ ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _lowerCamelCase : str = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '[UNK]', ] _lowerCamelCase : Dict = dict(zip(lowercase , range(len(lowercase ) ) ) ) _lowerCamelCase : Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _lowerCamelCase : Any = {'unk_token': '[UNK]'} _lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowercase ) ) def A_ ( self , **lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) def A_ ( self , lowercase ): _lowerCamelCase : List[str] = 'lower newer' _lowerCamelCase : List[Any] = 'lower newer' return input_text, output_text def A_ ( self ): _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = 'lower newer' _lowerCamelCase : str = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] _lowerCamelCase : List[Any] = tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token] _lowerCamelCase : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) def A_ ( self ): _lowerCamelCase : Any = self.get_tokenizer() _lowerCamelCase : List[str] = tokenizer('Hello' , 'World' ) _lowerCamelCase : Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['token_type_ids'] , lowercase ) @slow def A_ ( self ): _lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained('microsoft/deberta-base' ) _lowerCamelCase : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase ) _lowerCamelCase : str = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase ) _lowerCamelCase : Optional[int] = tokenizer.encode( 'sequence builders' , add_special_tokens=lowercase , add_prefix_space=lowercase ) _lowerCamelCase : List[str] = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase , add_prefix_space=lowercase ) _lowerCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase ) _lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def A_ ( self ): _lowerCamelCase : List[Any] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: _lowerCamelCase : Union[str, Any] = tokenizer_class.from_pretrained('microsoft/deberta-base' ) _lowerCamelCase : int = [ 'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary' ' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of' ' vocabulary embedding.', ] _lowerCamelCase : Tuple = tokenizer(lowercase , padding=lowercase ) _lowerCamelCase : List[Any] = [tokenizer.decode(lowercase , skip_special_tokens=lowercase ) for seq in encoding['input_ids']] # fmt: off _lowerCamelCase : List[str] = { 'input_ids': [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], 'token_type_ids': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on _lowerCamelCase : Any = [ 'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary' ' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of' ' vocabulary embedding.', ] self.assertDictEqual(encoding.data , lowercase ) for expected, decoded in zip(lowercase , lowercase ): self.assertEqual(lowercase , lowercase )
354
"""simple docstring""" def _snake_case ( lowercase__ ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection _lowerCamelCase : List[str] = len(lowercase__ ) _lowerCamelCase : List[str] = max(lowercase__ ) _lowerCamelCase : List[str] = min(lowercase__ ) # create the counting array _lowerCamelCase : List[Any] = coll_max + 1 - coll_min _lowerCamelCase : List[Any] = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): _lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1] # create the output collection _lowerCamelCase : Dict = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): _lowerCamelCase : Any = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _snake_case ( lowercase__ ): return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt" lowercase__ = input("""Enter numbers separated by a comma:\n""").strip() lowercase__ = [int(item) for item in user_input.split(""",""")] print(counting_sort(unsorted))
12
0
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowercase__ = logging.get_logger(__name__) @add_end_docstrings(lowercase ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , *lowercase , **lowercase ): super().__init__(*lowercase , **lowercase ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def A_ ( self , lowercase=None ): _lowerCamelCase : Optional[int] = {} if top_k is not None: _lowerCamelCase : Union[str, Any] = top_k return {}, {}, postprocess_params def __call__( self , lowercase , **lowercase ): return super().__call__(lowercase , **lowercase ) def A_ ( self , lowercase ): _lowerCamelCase : Optional[int] = load_image(lowercase ) _lowerCamelCase : int = self.image_processor(images=lowercase , return_tensors=self.framework ) return model_inputs def A_ ( self , lowercase ): _lowerCamelCase : str = self.model(**lowercase ) return model_outputs def A_ ( self , lowercase , lowercase=5 ): if top_k > self.model.config.num_labels: _lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": _lowerCamelCase : Any = model_outputs.logits.softmax(-1 )[0] _lowerCamelCase : int = probs.topk(lowercase ) elif self.framework == "tf": _lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0] _lowerCamelCase : int = tf.math.top_k(lowercase , k=lowercase ) _lowerCamelCase : Union[str, Any] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _lowerCamelCase : Any = scores.tolist() _lowerCamelCase : Optional[int] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
355
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) lowercase__ = parser.parse_args() lowercase__ = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
12
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType lowercase__ = logging.get_logger(__name__) lowercase__ = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """deberta-v2""" def __init__( self , lowercase=128100 , lowercase=1536 , lowercase=24 , lowercase=24 , lowercase=6144 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=0 , lowercase=0.02 , lowercase=1E-7 , lowercase=False , lowercase=-1 , lowercase=0 , lowercase=True , lowercase=None , lowercase=0 , lowercase="gelu" , **lowercase , ): super().__init__(**lowercase ) _lowerCamelCase : int = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : Tuple = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : int = relative_attention _lowerCamelCase : int = max_relative_positions _lowerCamelCase : int = pad_token_id _lowerCamelCase : int = position_biased_input # Backwards compatibility if type(lowercase ) == str: _lowerCamelCase : Optional[int] = [x.strip() for x in pos_att_type.lower().split('|' )] _lowerCamelCase : List[str] = pos_att_type _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : Any = kwargs.get('pooler_hidden_size' , lowercase ) _lowerCamelCase : Optional[Any] = pooler_dropout _lowerCamelCase : Dict = pooler_hidden_act class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @property def A_ ( self ): if self.task == "multiple-choice": _lowerCamelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCamelCase : str = {0: 'batch', 1: 'sequence'} if self._config.type_vocab_size > 0: return OrderedDict( [('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] ) else: return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] ) @property def A_ ( self ): return 12 def A_ ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 40 , lowercase = 40 , lowercase = None , ): _lowerCamelCase : Optional[Any] = super().generate_dummy_inputs(preprocessor=lowercase , framework=lowercase ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
356
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = (UnCLIPScheduler,) def A_ ( self , **lowercase ): _lowerCamelCase : Any = { 'num_train_timesteps': 1000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**lowercase ) return config def A_ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def A_ ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=lowercase ) def A_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase ) def A_ ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=lowercase ) def A_ ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=lowercase ) def A_ ( self ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=lowercase , prev_timestep=lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' ) _lowerCamelCase : str = scheduler_class(**lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5 def A_ ( self ): _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' ) _lowerCamelCase : int = scheduler_class(**lowercase ) _lowerCamelCase : List[str] = 0.5 assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5 def A_ ( self ): _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config() _lowerCamelCase : Tuple = scheduler_class(**lowercase ) _lowerCamelCase : Union[str, Any] = scheduler.timesteps _lowerCamelCase : Any = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter _lowerCamelCase : Optional[int] = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : Tuple = model(lowercase , lowercase ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample _lowerCamelCase : Optional[int] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3 def A_ ( self ): _lowerCamelCase : Tuple = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Optional[Any] = scheduler_class(**lowercase ) scheduler.set_timesteps(25 ) _lowerCamelCase : Optional[Any] = scheduler.timesteps _lowerCamelCase : Optional[int] = self.dummy_model() _lowerCamelCase : Any = self.dummy_sample_deter _lowerCamelCase : str = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : List[Any] = model(lowercase , lowercase ) if i + 1 == timesteps.shape[0]: _lowerCamelCase : Optional[int] = None else: _lowerCamelCase : List[str] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCamelCase : Union[str, Any] = scheduler.step( lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample _lowerCamelCase : List[Any] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3 def A_ ( self ): pass def A_ ( self ): pass
12
0
"""simple docstring""" import itertools import os import re lowercase__ = re.compile(R"""([A-Z]+)([A-Z][a-z])""") lowercase__ = re.compile(R"""([a-z\d])([A-Z])""") lowercase__ = re.compile(R"""(?<!_)_(?!_)""") lowercase__ = re.compile(R"""(_{2,})""") lowercase__ = R"""^\w+(\.\w+)*$""" lowercase__ = R"""<>:/\|?*""" def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[Any] = _uppercase_uppercase_re.sub(r'\1_\2' , lowercase__ ) _lowerCamelCase : Dict = _lowercase_uppercase_re.sub(r'\1_\2' , lowercase__ ) return name.lower() def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = _single_underscore_re.split(lowercase__ ) _lowerCamelCase : Any = [_multiple_underscores_re.split(lowercase__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase__ ) if n != '' ) def _snake_case ( lowercase__ ): if os.path.basename(lowercase__ ) != name: raise ValueError(f'''Should be a dataset name, not a path: {name}''' ) return camelcase_to_snakecase(lowercase__ ) def _snake_case ( lowercase__ , lowercase__ ): if os.path.basename(lowercase__ ) != name: raise ValueError(f'''Should be a dataset name, not a path: {name}''' ) if not re.match(_split_re , lowercase__ ): raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' ) return f'''{filename_prefix_for_name(lowercase__ )}-{split}''' def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None ): _lowerCamelCase : str = filename_prefix_for_split(lowercase__ , lowercase__ ) if filetype_suffix: prefix += f'''.{filetype_suffix}''' _lowerCamelCase : str = os.path.join(lowercase__ , lowercase__ ) return f'''{filepath}*''' def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): _lowerCamelCase : List[Any] = filename_prefix_for_split(lowercase__ , lowercase__ ) _lowerCamelCase : Union[str, Any] = os.path.join(lowercase__ , lowercase__ ) if shard_lengths: _lowerCamelCase : int = len(lowercase__ ) _lowerCamelCase : Optional[int] = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase__ )] if filetype_suffix: _lowerCamelCase : str = [filename + f'''.{filetype_suffix}''' for filename in filenames] return filenames else: _lowerCamelCase : Dict = prefix if filetype_suffix: filename += f'''.{filetype_suffix}''' return [filename]
357
"""simple docstring""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """data2vec-audio""" def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) _lowerCamelCase : str = hidden_size _lowerCamelCase : str = feat_extract_activation _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Optional[Any] = conv_bias _lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings _lowerCamelCase : List[Any] = num_conv_pos_embedding_groups _lowerCamelCase : List[Any] = conv_pos_kernel_size _lowerCamelCase : Optional[int] = len(self.conv_dim ) _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Any = intermediate_size _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Any = hidden_dropout _lowerCamelCase : Union[str, Any] = attention_dropout _lowerCamelCase : str = activation_dropout _lowerCamelCase : Any = feat_proj_dropout _lowerCamelCase : Tuple = final_dropout _lowerCamelCase : Union[str, Any] = layerdrop _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Optional[Any] = mask_time_prob _lowerCamelCase : List[Any] = mask_time_length _lowerCamelCase : List[Any] = mask_time_min_masks _lowerCamelCase : Tuple = mask_feature_prob _lowerCamelCase : Optional[Any] = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # ctc loss _lowerCamelCase : Tuple = ctc_loss_reduction _lowerCamelCase : str = ctc_zero_infinity # adapter _lowerCamelCase : Union[str, Any] = add_adapter _lowerCamelCase : List[Any] = adapter_kernel_size _lowerCamelCase : Optional[Any] = adapter_stride _lowerCamelCase : List[Any] = num_adapter_layers _lowerCamelCase : int = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCamelCase : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCamelCase : List[str] = list(lowercase ) _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Any = list(lowercase ) _lowerCamelCase : Optional[Any] = xvector_output_dim @property def A_ ( self ): return math.prod(self.conv_stride )
12
0
"""simple docstring""" lowercase__ = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] lowercase__ = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] lowercase__ = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] lowercase__ = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] lowercase__ = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] lowercase__ = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] lowercase__ = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] lowercase__ = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
358
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool lowercase__ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """facebook/nllb-200-distilled-600M""" lowerCamelCase__ = ( """This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """ """be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """ """which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """ """plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`.""" ) lowerCamelCase__ = """translator""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = LANGUAGE_CODES lowerCamelCase__ = ["""text""", """text""", """text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase , lowercase , lowercase ): if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) _lowerCamelCase : str = self.lang_to_code[src_lang] _lowerCamelCase : int = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase ) def A_ ( self , lowercase ): return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase )
12
0
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowercase__ = 50_0000 lowercase__ , lowercase__ = os.path.split(__file__) lowercase__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def _snake_case ( lowercase__ , **lowercase__ ): _lowerCamelCase : Union[str, Any] = dataset.map(**lowercase__ ) @get_duration def _snake_case ( lowercase__ , **lowercase__ ): _lowerCamelCase : int = dataset.filter(**lowercase__ ) def _snake_case ( ): _lowerCamelCase : int = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: _lowerCamelCase : Tuple = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) _lowerCamelCase : Tuple = generate_example_dataset( os.path.join(lowercase__ , 'dataset.arrow' ) , lowercase__ , num_examples=lowercase__ ) _lowerCamelCase : Optional[int] = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowercase__ ) def tokenize(lowercase__ ): return tokenizer(examples['text'] ) _lowerCamelCase : Dict = map(lowercase__ ) _lowerCamelCase : Union[str, Any] = map(lowercase__ , batched=lowercase__ ) _lowerCamelCase : Dict = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='numpy' ): _lowerCamelCase : Optional[Any] = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='pandas' ): _lowerCamelCase : str = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='torch' , columns='numbers' ): _lowerCamelCase : Tuple = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): _lowerCamelCase : Optional[int] = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) _lowerCamelCase : Tuple = map(lowercase__ , function=lowercase__ , batched=lowercase__ ) _lowerCamelCase : Optional[Any] = filter(lowercase__ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase__ , 'wb' ) as f: f.write(json.dumps(lowercase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
359
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[int] = hf_hub_download( repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 ) _lowerCamelCase : List[str] = [ example_video_filepath, 'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4', ] return video_classifier, examples def A_ ( self , lowercase , lowercase ): for example in examples: _lowerCamelCase : Tuple = video_classifier(lowercase ) self.assertEqual( lowercase , [ {'score': ANY(lowercase ), 'label': ANY(lowercase )}, {'score': ANY(lowercase ), 'label': ANY(lowercase )}, ] , ) @require_torch def A_ ( self ): _lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification' _lowerCamelCase : Tuple = VideoMAEFeatureExtractor( size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} ) _lowerCamelCase : Dict = pipeline( 'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 ) _lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , ) _lowerCamelCase : str = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], ] , ) @require_tf def A_ ( self ): pass
12
0
"""simple docstring""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): # Initialise PyTorch model _lowerCamelCase : Any = FunnelConfig.from_json_file(lowercase__ ) print(f'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : Optional[int] = FunnelBaseModel(lowercase__ ) if base_model else FunnelModel(lowercase__ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowercase__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not.""" ) lowercase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
360
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
0
"""simple docstring""" import numpy # List of input, output pairs lowercase__ = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) lowercase__ = (((515, 22, 13), 555), ((61, 35, 49), 150)) lowercase__ = [2, 4, 1, 5] lowercase__ = len(train_data) lowercase__ = 0.009 def _snake_case ( lowercase__ , lowercase__="train" ): return calculate_hypothesis_value(lowercase__ , lowercase__ ) - output( lowercase__ , lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : int = 0 for i in range(len(lowercase__ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _snake_case ( lowercase__ , lowercase__ ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _snake_case ( lowercase__ , lowercase__ ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _snake_case ( lowercase__ , lowercase__=m ): _lowerCamelCase : Optional[int] = 0 for i in range(lowercase__ ): if index == -1: summation_value += _error(lowercase__ ) else: summation_value += _error(lowercase__ ) * train_data[i][0][index] return summation_value def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = summation_of_cost_derivative(lowercase__ , lowercase__ ) / m return cost_derivative_value def _snake_case ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output _lowerCamelCase : Tuple = 0.0_0_0_0_0_2 _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Optional[int] = 0 while True: j += 1 _lowerCamelCase : List[Any] = [0, 0, 0, 0] for i in range(0 , len(lowercase__ ) ): _lowerCamelCase : List[Any] = get_cost_derivative(i - 1 ) _lowerCamelCase : List[str] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowercase__ , lowercase__ , atol=lowercase__ , rtol=lowercase__ , ): break _lowerCamelCase : List[Any] = temp_parameter_vector print(('Number of iterations:', j) ) def _snake_case ( ): for i in range(len(lowercase__ ) ): print(('Actual output value:', output(lowercase__ , 'test' )) ) print(('Hypothesis output:', calculate_hypothesis_value(lowercase__ , 'test' )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
361
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): if attention_mask is None: _lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = OPTConfig lowerCamelCase__ = {} lowerCamelCase__ = """gelu""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ): _lowerCamelCase : Tuple = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : Tuple = seq_length _lowerCamelCase : str = is_training _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : List[Any] = eos_token_id _lowerCamelCase : Tuple = pad_token_id _lowerCamelCase : List[str] = bos_token_id _lowerCamelCase : Optional[int] = embed_dim _lowerCamelCase : List[str] = word_embed_proj_dim _lowerCamelCase : Any = False def A_ ( self ): _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 ) _lowerCamelCase : Tuple = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , ) _lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase ) return config, inputs_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase ) _lowerCamelCase : Optional[Any] = inputs_dict['input_ids'] _lowerCamelCase : str = input_ids[:1, :] _lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :] _lowerCamelCase : Optional[Any] = 1 # first forward pass _lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) _lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) _lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0] _lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] _lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else () lowerCamelCase__ = ( {"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = 10 def A_ ( self ): _lowerCamelCase : int = TFOPTModelTester(self ) _lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase , lowercase ): if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings _lowerCamelCase : Optional[int] = model_class(config=lowercase ) _lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase ) _lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. _lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase ) # check that weights remain the same after resizing _lowerCamelCase : int = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Optional[Any] = False self.assertTrue(lowercase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase ) _lowerCamelCase : Dict = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Union[str, Any] = False self.assertTrue(lowercase ) def _snake_case ( lowercase__ ): return tf.constant(lowercase__ , dtype=tf.intaa ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = 99 def A_ ( self ): _lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2 _lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) _lowerCamelCase : int = input_ids.shape[0] _lowerCamelCase : List[Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' ) _lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) _lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id ) with tf.GradientTape(): _lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state _lowerCamelCase : Optional[Any] = (1, 11, 512) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : List[str] = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) ) _lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): super().setUp() _lowerCamelCase : List[Any] = 'facebook/opt-350m' def A_ ( self ): _lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model ) _lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model ) _lowerCamelCase : List[str] = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase ) _lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) _lowerCamelCase : Any = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) _lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def A_ ( self ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def A_ ( self ): _lowerCamelCase : str = 'facebook/opt-125m' _lowerCamelCase : Dict = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : int = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : List[Any] = 'facebook/opt-350m' _lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase ) _lowerCamelCase : Any = 'left' # use different length sentences to test batching _lowerCamelCase : Optional[int] = [ 'Hello, my dog is a little', 'Today, I', ] _lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase ) _lowerCamelCase : int = inputs['input_ids'] _lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] ) _lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase ) _lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) _lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) _lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) def A_ ( self ): _lowerCamelCase : Tuple = 'facebook/opt-350m' _lowerCamelCase : List[Any] = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase )
12
0
"""simple docstring""" from torch import nn def _snake_case ( lowercase__ ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
362
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """philschmid/bart-large-cnn-samsum""" lowerCamelCase__ = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) lowerCamelCase__ = """summarizer""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = ["""text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase ): return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase )[0] def A_ ( self , lowercase ): return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
12
0
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = RoFormerTokenizer lowerCamelCase__ = RoFormerTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True def A_ ( self ): super().setUp() def A_ ( self , **lowercase ): return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowercase ) def A_ ( self , **lowercase ): return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowercase ) def A_ ( self ): _lowerCamelCase : str = '永和服装饰品有限公司,今天天气非常好' _lowerCamelCase : Optional[int] = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好' return input_text, output_text def A_ ( self ): _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : List[str] = self.get_chinese_input_output_texts() _lowerCamelCase : List[Any] = tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , output_text.split() ) _lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token] _lowerCamelCase : Optional[Any] = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) def A_ ( self ): _lowerCamelCase : int = self.get_rust_tokenizer() _lowerCamelCase : int = self.get_chinese_input_output_texts() _lowerCamelCase : int = tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , output_text.split() ) _lowerCamelCase : List[Any] = tokens + [tokenizer.unk_token] _lowerCamelCase : Tuple = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) def A_ ( self ): pass def A_ ( self ): pass def A_ ( self ): pass
363
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) ) _lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )] index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ ) _lowerCamelCase : float = 0 _lowerCamelCase : list[float] = [0] * len(lowercase__ ) for i in index: if weight[i] <= capacity: _lowerCamelCase : int = 1 max_value += value[i] capacity -= weight[i] else: _lowerCamelCase : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" lowercase__ = """0.21.0""" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
364
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowercase__ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowercase__ = [] lowercase__ = [] lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowercase__ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results", """emoji""": True, }, } ] lowercase__ = 0 for log in Path().glob("""*.log"""): lowercase__ = 0 with open(log, """r""") as f: for line in f: lowercase__ = json.loads(line) if line.get("""nodeid""", """""") != "": lowercase__ = line["""nodeid"""] if line.get("""duration""", None) is not None: lowercase__ = F"{line['duration']:.4f}" if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowercase__ = [] log.unlink() lowercase__ = """""" lowercase__ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowercase__ = [] lowercase__ = {} for test in failed_tests: lowercase__ = test[0].split("""::""") lowercase__ = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowercase__ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowercase__ = [test[0] for test in failed_table] lowercase__ = list(set(files)) # Count number of instances in failed_tests lowercase__ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowercase__ = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: lowercase__ = """Too many failed tests, please see the full report in the Action results.""" lowercase__ = len(err) + 10 lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}" print(F"### {message}") else: lowercase__ = """No failed tests! 🤗""" print(F"## {message}") payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) lowercase__ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", } ], } payload.append(date_report) lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowercase__ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowercase__ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowercase__ = row[0] else: lowercase__ = """""" lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```", }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
12
0
"""simple docstring""" import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase , ): super().__init__() _lowerCamelCase : Optional[int] = value_function _lowerCamelCase : Optional[int] = unet _lowerCamelCase : Any = scheduler _lowerCamelCase : Dict = env _lowerCamelCase : Any = env.get_dataset() _lowerCamelCase : Any = {} for key in self.data.keys(): try: _lowerCamelCase : Union[str, Any] = self.data[key].mean() except: # noqa: E722 pass _lowerCamelCase : Optional[int] = {} for key in self.data.keys(): try: _lowerCamelCase : Optional[Any] = self.data[key].std() except: # noqa: E722 pass _lowerCamelCase : int = env.observation_space.shape[0] _lowerCamelCase : Union[str, Any] = env.action_space.shape[0] def A_ ( self , lowercase , lowercase ): return (x_in - self.means[key]) / self.stds[key] def A_ ( self , lowercase , lowercase ): return x_in * self.stds[key] + self.means[key] def A_ ( self , lowercase ): if type(lowercase ) is dict: return {k: self.to_torch(lowercase ) for k, v in x_in.items()} elif torch.is_tensor(lowercase ): return x_in.to(self.unet.device ) return torch.tensor(lowercase , device=self.unet.device ) def A_ ( self , lowercase , lowercase , lowercase ): for key, val in cond.items(): _lowerCamelCase : Union[str, Any] = val.clone() return x_in def A_ ( self , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Union[str, Any] = x.shape[0] _lowerCamelCase : Union[str, Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model _lowerCamelCase : Optional[int] = torch.full((batch_size,) , lowercase , device=self.unet.device , dtype=torch.long ) for _ in range(lowercase ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models _lowerCamelCase : Optional[Any] = self.value_function(x.permute(0 , 2 , 1 ) , lowercase ).sample _lowerCamelCase : int = torch.autograd.grad([y.sum()] , [x] )[0] _lowerCamelCase : List[Any] = self.scheduler._get_variance(lowercase ) _lowerCamelCase : Union[str, Any] = torch.exp(0.5 * posterior_variance ) _lowerCamelCase : Optional[int] = model_std * grad _lowerCamelCase : List[Any] = 0 _lowerCamelCase : int = x.detach() _lowerCamelCase : Union[str, Any] = x + scale * grad _lowerCamelCase : Union[str, Any] = self.reset_xa(lowercase , lowercase , self.action_dim ) _lowerCamelCase : Union[str, Any] = self.unet(x.permute(0 , 2 , 1 ) , lowercase ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg _lowerCamelCase : Dict = self.scheduler.step(lowercase , lowercase , lowercase , predict_epsilon=lowercase )['prev_sample'] # apply conditions to the trajectory (set the initial state) _lowerCamelCase : List[str] = self.reset_xa(lowercase , lowercase , self.action_dim ) _lowerCamelCase : int = self.to_torch(lowercase ) return x, y def __call__( self , lowercase , lowercase=64 , lowercase=32 , lowercase=2 , lowercase=0.1 ): # normalize the observations and create batch dimension _lowerCamelCase : Optional[int] = self.normalize(lowercase , 'observations' ) _lowerCamelCase : int = obs[None].repeat(lowercase , axis=0 ) _lowerCamelCase : Any = {0: self.to_torch(lowercase )} _lowerCamelCase : str = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) _lowerCamelCase : Tuple = randn_tensor(lowercase , device=self.unet.device ) _lowerCamelCase : Optional[Any] = self.reset_xa(lowercase , lowercase , self.action_dim ) _lowerCamelCase : int = self.to_torch(lowercase ) # run the diffusion process _lowerCamelCase : List[Any] = self.run_diffusion(lowercase , lowercase , lowercase , lowercase ) # sort output trajectories by value _lowerCamelCase : List[Any] = y.argsort(0 , descending=lowercase ).squeeze() _lowerCamelCase : List[str] = x[sorted_idx] _lowerCamelCase : Any = sorted_values[:, :, : self.action_dim] _lowerCamelCase : Union[str, Any] = actions.detach().cpu().numpy() _lowerCamelCase : Dict = self.de_normalize(lowercase , key='actions' ) # select the action with the highest value if y is not None: _lowerCamelCase : List[Any] = 0 else: # if we didn't run value guiding, select a random action _lowerCamelCase : Union[str, Any] = np.random.randint(0 , lowercase ) _lowerCamelCase : Union[str, Any] = denorm_actions[selected_index, 0] return denorm_actions
365
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """AutoTokenizer""" lowerCamelCase__ = ["""tokenizer"""] lowerCamelCase__ = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , lowercase , lowercase=None ): super().__init__(lowercase ) _lowerCamelCase : Optional[int] = speaker_embeddings @classmethod def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: _lowerCamelCase : Optional[Any] = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'''`{os.path.join(lowercase , lowercase )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) _lowerCamelCase : List[Any] = None else: with open(lowercase ) as speaker_embeddings_json: _lowerCamelCase : Union[str, Any] = json.load(lowercase ) else: _lowerCamelCase : Tuple = None _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase ) _lowerCamelCase : int = {} _lowerCamelCase : List[Any] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": _lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase ) _lowerCamelCase : Any = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , ) _lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' ) _lowerCamelCase : Optional[Any] = tmp_dict with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def A_ ( self , lowercase = None , **lowercase ): _lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset] _lowerCamelCase : Any = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) _lowerCamelCase : Union[str, Any] = get_file_from_repo( self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if path is None: raise ValueError( F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) _lowerCamelCase : List[str] = np.load(lowercase ) return voice_preset_dict def A_ ( self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): _lowerCamelCase : Any = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ): _lowerCamelCase : Optional[Any] = voice_preset + '.npz' _lowerCamelCase : Union[str, Any] = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) _lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase ) _lowerCamelCase : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: _lowerCamelCase : Optional[int] = voice_preset return encoded_text
12
0
"""simple docstring""" import os def _snake_case ( ): _lowerCamelCase : Tuple = os.path.dirname(os.path.realpath(lowercase__ ) ) _lowerCamelCase : Any = os.path.join(lowercase__ , 'triangle.txt' ) with open(lowercase__ ) as f: _lowerCamelCase : List[str] = f.readlines() _lowerCamelCase : List[Any] = [] for line in triangle: _lowerCamelCase : str = [] for number in line.strip().split(' ' ): numbers_from_line.append(int(lowercase__ ) ) a.append(lowercase__ ) for i in range(1 , len(lowercase__ ) ): for j in range(len(a[i] ) ): _lowerCamelCase : List[str] = a[i - 1][j] if j != len(a[i - 1] ) else 0 _lowerCamelCase : Any = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(lowercase__ , lowercase__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
366
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device lowercase__ = False class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _lowerCamelCase : Dict = torch.manual_seed(0 ) _lowerCamelCase : Dict = pipe( image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _lowerCamelCase : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
12
0
"""simple docstring""" lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def _snake_case ( ): _lowerCamelCase : str = input('Enter message: ' ) _lowerCamelCase : Optional[int] = input('Enter key [alphanumeric]: ' ) _lowerCamelCase : Dict = input('Encrypt/Decrypt [e/d]: ' ) if mode.lower().startswith('e' ): _lowerCamelCase : Union[str, Any] = 'encrypt' _lowerCamelCase : List[Any] = encrypt_message(lowercase__ , lowercase__ ) elif mode.lower().startswith('d' ): _lowerCamelCase : List[str] = 'decrypt' _lowerCamelCase : Any = decrypt_message(lowercase__ , lowercase__ ) print(f'''\n{mode.title()}ed message:''' ) print(lowercase__ ) def _snake_case ( lowercase__ , lowercase__ ): return translate_message(lowercase__ , lowercase__ , 'encrypt' ) def _snake_case ( lowercase__ , lowercase__ ): return translate_message(lowercase__ , lowercase__ , 'decrypt' ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : str = [] _lowerCamelCase : List[Any] = 0 _lowerCamelCase : List[Any] = key.upper() for symbol in message: _lowerCamelCase : Optional[Any] = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(lowercase__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(lowercase__ ): _lowerCamelCase : Optional[Any] = 0 else: translated.append(lowercase__ ) return "".join(lowercase__ ) if __name__ == "__main__": main()
367
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowercase__ = { """E""": 12.70, """T""": 9.06, """A""": 8.17, """O""": 7.51, """I""": 6.97, """N""": 6.75, """S""": 6.33, """H""": 6.09, """R""": 5.99, """D""": 4.25, """L""": 4.03, """C""": 2.78, """U""": 2.76, """M""": 2.41, """W""": 2.36, """F""": 2.23, """G""": 2.02, """Y""": 1.97, """P""": 1.93, """B""": 1.29, """V""": 0.98, """K""": 0.77, """J""": 0.15, """X""": 0.15, """Q""": 0.10, """Z""": 0.07, } lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ""" lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( lowercase__ ): return x[0] def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = get_letter_count(lowercase__ ) _lowerCamelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowercase__ ) _lowerCamelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ ) _lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] ) _lowerCamelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowercase__ , reverse=lowercase__ ) _lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : str = get_frequency_order(lowercase__ ) _lowerCamelCase : Union[str, Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _snake_case ( lowercase__ ): return 1 / (1 + np.exp(-z )) def _snake_case ( lowercase__ , lowercase__ ): return (-y * np.log(lowercase__ ) - (1 - y) * np.log(1 - h )).mean() def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : int = np.dot(lowercase__ , lowercase__ ) return np.sum(y * scores - np.log(1 + np.exp(lowercase__ ) ) ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=70000 ): _lowerCamelCase : int = np.zeros(x.shape[1] ) for iterations in range(lowercase__ ): _lowerCamelCase : Dict = np.dot(lowercase__ , lowercase__ ) _lowerCamelCase : List[Any] = sigmoid_function(lowercase__ ) _lowerCamelCase : Optional[int] = np.dot(x.T , h - y ) / y.size _lowerCamelCase : Any = theta - alpha * gradient # updating the weights _lowerCamelCase : int = np.dot(lowercase__ , lowercase__ ) _lowerCamelCase : Any = sigmoid_function(lowercase__ ) _lowerCamelCase : str = cost_function(lowercase__ , lowercase__ ) if iterations % 100 == 0: print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": lowercase__ = datasets.load_iris() lowercase__ = iris.data[:, :2] lowercase__ = (iris.target != 0) * 1 lowercase__ = 0.1 lowercase__ = logistic_reg(alpha, x, y, max_iterations=7_0000) print("""theta: """, theta) # printing the theta i.e our weights vector def _snake_case ( lowercase__ ): return sigmoid_function( np.dot(lowercase__ , lowercase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""") ((lowercase__) , (lowercase__)) = (x[:, 0].min(), x[:, 0].max()) ((lowercase__) , (lowercase__)) = (x[:, 1].min(), x[:, 1].max()) ((lowercase__) , (lowercase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) lowercase__ = np.c_[xxa.ravel(), xxa.ravel()] lowercase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""") plt.legend() plt.show()
368
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase ): _lowerCamelCase : Dict = question_encoder _lowerCamelCase : List[Any] = generator _lowerCamelCase : Optional[Any] = self.question_encoder def A_ ( self , lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' ) _lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' ) self.question_encoder.save_pretrained(lowercase ) self.generator.save_pretrained(lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase ) if config is None: _lowerCamelCase : int = RagConfig.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' ) _lowerCamelCase : Dict = AutoTokenizer.from_pretrained( lowercase , config=config.generator , subfolder='generator_tokenizer' ) return cls(question_encoder=lowercase , generator=lowercase ) def __call__( self , *lowercase , **lowercase ): return self.current_tokenizer(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.decode(*lowercase , **lowercase ) def A_ ( self ): _lowerCamelCase : Any = self.question_encoder def A_ ( self ): _lowerCamelCase : Optional[Any] = self.generator def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ): warnings.warn( '`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ' 'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ' 'context manager to prepare your targets. See the documentation of your specific tokenizer for more ' 'details' , lowercase , ) if max_length is None: _lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length _lowerCamelCase : Optional[Any] = self( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _lowerCamelCase : int = self.current_tokenizer.model_max_length _lowerCamelCase : str = self( text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , ) _lowerCamelCase : int = labels['input_ids'] return model_inputs
12
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
369
"""simple docstring""" def _snake_case ( lowercase__ = 10 ): if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError('Invalid input' ) _lowerCamelCase : str = 10**n _lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"{solution(10) = }")
12
0
"""simple docstring""" def _snake_case ( lowercase__ ): if any(not isinstance(lowercase__ , lowercase__ ) or x < 0 for x in sequence ): raise TypeError('Sequence must be list of non-negative integers' ) for _ in range(len(lowercase__ ) ): for i, (rod_upper, rod_lower) in enumerate(zip(lowercase__ , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
370
"""simple docstring""" import argparse import datetime def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } _lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase__ ) < 11: raise ValueError('Must be 10 characters long' ) # Get month _lowerCamelCase : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) _lowerCamelCase : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day _lowerCamelCase : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator _lowerCamelCase : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year _lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation _lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) ) # Start math if m <= 2: _lowerCamelCase : str = y - 1 _lowerCamelCase : Tuple = m + 12 # maths var _lowerCamelCase : int = int(str(lowercase__ )[:2] ) _lowerCamelCase : int = int(str(lowercase__ )[2:] ) _lowerCamelCase : int = int(2.6 * m - 5.3_9 ) _lowerCamelCase : int = int(c / 4 ) _lowerCamelCase : int = int(k / 4 ) _lowerCamelCase : int = int(d + k ) _lowerCamelCase : int = int(t + u + v + x ) _lowerCamelCase : int = int(z - (2 * c) ) _lowerCamelCase : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response _lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) lowercase__ = parser.parse_args() zeller(args.date_input)
12
0
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""") lowercase__ = cvtColor(img, COLOR_BGR2GRAY) def _snake_case ( ): _lowerCamelCase : List[str] = cn.convert_to_negative(lowercase__ ) # assert negative_img array for at least one True assert negative_img.any() def _snake_case ( ): with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase__ , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _snake_case ( ): _lowerCamelCase : int = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _snake_case ( ): _lowerCamelCase : int = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() _lowerCamelCase : List[Any] = canny.canny(lowercase__ ) # assert canny array for at least one True assert canny_array.any() def _snake_case ( ): assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all() def _snake_case ( ): # laplace diagonals _lowerCamelCase : Any = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] ) _lowerCamelCase : int = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ ) assert res.any() def _snake_case ( ): assert med.median_filter(lowercase__ , 3 ).any() def _snake_case ( ): _lowerCamelCase : List[Any] = sob.sobel_filter(lowercase__ ) assert grad.any() and theta.any() def _snake_case ( ): _lowerCamelCase : Dict = sp.make_sepia(lowercase__ , 20 ) assert sepia.all() def _snake_case ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" ): _lowerCamelCase : Any = bs.Burkes(imread(lowercase__ , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def _snake_case ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" , ): _lowerCamelCase : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def _snake_case ( ): _lowerCamelCase : str = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. _lowerCamelCase : List[str] = imread(lowercase__ , 0 ) # Test for get_neighbors_pixel function() return not None _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : int = 0 _lowerCamelCase : Union[str, Any] = image[x_coordinate][y_coordinate] _lowerCamelCase : Optional[int] = lbp.get_neighbors_pixel( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image _lowerCamelCase : Dict = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): _lowerCamelCase : Optional[Any] = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ ) assert lbp_image.any()
371
"""simple docstring""" import re def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' ) if match := re.search(lowercase__ , lowercase__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
12
0
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int]=3 , __magic_name__ : List[Any]=32 , __magic_name__ : int=3 , __magic_name__ : Union[str, Any]=10 , __magic_name__ : Optional[int]=[10, 20, 30, 40] , __magic_name__ : Union[str, Any]=[1, 1, 2, 1] , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[str]="relu" , __magic_name__ : Optional[int]=3 , __magic_name__ : Optional[int]=None , ) -> Optional[int]: """simple docstring""" __snake_case : Dict = parent __snake_case : List[str] = batch_size __snake_case : Dict = image_size __snake_case : Tuple = num_channels __snake_case : int = embeddings_size __snake_case : Optional[int] = hidden_sizes __snake_case : Any = depths __snake_case : Optional[int] = is_training __snake_case : Optional[int] = use_labels __snake_case : Tuple = hidden_act __snake_case : Dict = num_labels __snake_case : int = scope __snake_case : Optional[int] = len(__magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" __snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : str = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : Dict = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Tuple ) -> List[Any]: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def lowercase__ ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = RegNetModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Any = model(__magic_name__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> List[Any]: """simple docstring""" __snake_case : List[str] = self.num_labels __snake_case : List[str] = RegNetForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Optional[int] = config_and_inputs __snake_case : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () lowercase__: Dict = ( {'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification} if is_torch_available() else {} ) lowercase__: List[Any] = False lowercase__: int = False lowercase__: Tuple = False lowercase__: Optional[int] = False def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = RegNetModelTester(self ) __snake_case : Optional[int] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" return @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def lowercase__ ( self : int ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Any = model_class(__magic_name__ ) __snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Dict = [*signature.parameters.keys()] __snake_case : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : Tuple ) -> List[Any]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = model_class(config=__magic_name__ ) for name, module in model.named_modules(): if isinstance(__magic_name__ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str ): __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Any = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : int = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: __snake_case : Union[str, Any] = layer_type __snake_case : Any = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : List[str] = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : str ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def lowercase__ ( self : Dict ) -> str: """simple docstring""" for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = RegNetModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase__ ( self : str ) -> Any: """simple docstring""" __snake_case : List[str] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__magic_name__ ) __snake_case : Optional[Any] = self.default_image_processor __snake_case : int = prepare_img() __snake_case : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : List[Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
1
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _A : @staticmethod def lowercase__ ( *__magic_name__ : str , **__magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" pass @is_pipeline_test @require_vision class _A ( unittest.TestCase ): @require_torch def lowercase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __snake_case : Dict = pipeline( model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , ) __snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __snake_case : Optional[Any] = image_classifier(__magic_name__ , candidate_labels=["""a""", """b""", """c"""] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__magic_name__ ) , [ [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}], [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}], ] , ) __snake_case : Optional[int] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 ) self.assertEqual( nested_simplify(__magic_name__ ) , [ [ {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, ], [ {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, ], [ {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, ], [ {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, ], [ {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, ], ] , ) @require_tf def lowercase__ ( self : int ) -> Optional[Any]: """simple docstring""" __snake_case : Union[str, Any] = pipeline( model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" ) __snake_case : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __snake_case : Tuple = image_classifier(__magic_name__ , candidate_labels=["""a""", """b""", """c"""] ) self.assertEqual( nested_simplify(__magic_name__ ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , ) __snake_case : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 ) self.assertEqual( nested_simplify(__magic_name__ ) , [ [ {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, ], [ {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, ], [ {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, ], [ {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, ], [ {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, {"""score""": 0.333, """label""": ANY(__magic_name__ )}, ], ] , ) @slow @require_torch def lowercase__ ( self : Optional[Any] ) -> Any: """simple docstring""" __snake_case : Optional[Any] = pipeline( task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , ) # This is an image of 2 cats with remotes and no planes __snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __snake_case : Optional[Any] = image_classifier(__magic_name__ , candidate_labels=["""cat""", """plane""", """remote"""] ) self.assertEqual( nested_simplify(__magic_name__ ) , [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ] , ) __snake_case : Optional[Any] = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 ) self.assertEqual( nested_simplify(__magic_name__ ) , [ [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ], ] * 5 , ) @slow @require_tf def lowercase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __snake_case : Tuple = pipeline( task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" ) # This is an image of 2 cats with remotes and no planes __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __snake_case : Dict = image_classifier(__magic_name__ , candidate_labels=["""cat""", """plane""", """remote"""] ) self.assertEqual( nested_simplify(__magic_name__ ) , [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ] , ) __snake_case : int = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 ) self.assertEqual( nested_simplify(__magic_name__ ) , [ [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ], ] * 5 , )
13
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = image_size __snake_case : Tuple = patch_size __snake_case : str = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : Dict = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : List[Any] = shape_size __snake_case : Tuple = num_labels __snake_case : List[Any] = num_choices __snake_case : Optional[Any] = scope __snake_case : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : List[str] = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __snake_case : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Union[str, Any] = bbox[i, j, 3] __snake_case : Union[str, Any] = bbox[i, j, 1] __snake_case : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Optional[Any] = bbox[i, j, 2] __snake_case : Tuple = bbox[i, j, 0] __snake_case : Optional[Any] = tmp_coordinate __snake_case : Dict = tf.constant(__magic_name__ ) __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ ) # text + image __snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) __snake_case : List[str] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , ) __snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Any = self.num_labels __snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ ) __snake_case : Tuple = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = 2 __snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs __snake_case : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase__: Union[str, Any] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase__: Dict = False lowercase__: int = False lowercase__: Dict = False def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" return True def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict: """simple docstring""" __snake_case : Any = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = { k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : str = TFLayoutLMvaModelTester(self ) __snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ): # The number of elements in the loss should be the same as the number of elements in the label __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0] ] __snake_case : List[str] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = prepared_for_class.pop("""input_ids""" ) __snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : str = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __snake_case : str = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __snake_case : Dict = -1_00 __snake_case : str = tf.convert_to_tensor(__magic_name__ ) __snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = model(__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) # Get keys that were added with the _prepare_for_class function __snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys() __snake_case : Optional[Any] = inspect.signature(model.call ).parameters __snake_case : int = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __snake_case : Union[str, Any] = {0: """input_ids"""} for label_key in label_keys: __snake_case : int = signature_names.index(__magic_name__ ) __snake_case : Optional[int] = label_key __snake_case : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __snake_case : List[str] = prepared_for_class[value] __snake_case : str = tuple(__magic_name__ ) # Send to model __snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Tuple = type self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __snake_case : str = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values __snake_case : Tuple = tf.constant([[1, 2]] ) __snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) # verify the logits __snake_case : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Tuple = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(__lowercase ) class _A ( __lowercase ): def __init__( self : Dict , **__magic_name__ : int ) -> List[str]: """simple docstring""" super().__init__(**__magic_name__ ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , """vision""" ) self.check_model_type(__magic_name__ ) def __call__( self : List[str] , __magic_name__ : Union[str, "Image.Image", List[Dict[str, Any]]] , __magic_name__ : Union[str, List[str]] = None , **__magic_name__ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" if "text_queries" in kwargs: __snake_case : str = kwargs.pop("""text_queries""" ) if isinstance(__magic_name__ , (str, Image.Image) ): __snake_case : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels} else: __snake_case : List[str] = image __snake_case : Dict = super().__call__(__magic_name__ , **__magic_name__ ) return results def lowercase__ ( self : Any , **__magic_name__ : Optional[Any] ) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = {} if "threshold" in kwargs: __snake_case : Optional[int] = kwargs["""threshold"""] if "top_k" in kwargs: __snake_case : Dict = kwargs["""top_k"""] return {}, {}, postprocess_params def lowercase__ ( self : Any , __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" __snake_case : int = load_image(inputs["""image"""] ) __snake_case : Tuple = inputs["""candidate_labels"""] if isinstance(__magic_name__ , __magic_name__ ): __snake_case : Union[str, Any] = candidate_labels.split(""",""" ) __snake_case : Optional[int] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(__magic_name__ ): __snake_case : int = self.tokenizer(__magic_name__ , return_tensors=self.framework ) __snake_case : Union[str, Any] = self.image_processor(__magic_name__ , return_tensors=self.framework ) yield { "is_last": i == len(__magic_name__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowercase__ ( self : Any , __magic_name__ : str ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = model_inputs.pop("""target_size""" ) __snake_case : Dict = model_inputs.pop("""candidate_label""" ) __snake_case : List[str] = model_inputs.pop("""is_last""" ) __snake_case : str = self.model(**__magic_name__ ) __snake_case : Dict = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def lowercase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Tuple=None ) -> str: """simple docstring""" __snake_case : Optional[int] = [] for model_output in model_outputs: __snake_case : Dict = model_output["""candidate_label"""] __snake_case : Union[str, Any] = BaseModelOutput(__magic_name__ ) __snake_case : int = self.image_processor.post_process_object_detection( outputs=__magic_name__ , threshold=__magic_name__ , target_sizes=model_output["""target_size"""] )[0] for index in outputs["scores"].nonzero(): __snake_case : Tuple = outputs["""scores"""][index].item() __snake_case : str = self._get_bounding_box(outputs["""boxes"""][index][0] ) __snake_case : List[str] = {"""score""": score, """label""": label, """box""": box} results.append(__magic_name__ ) __snake_case : Dict = sorted(__magic_name__ , key=lambda __magic_name__ : x["score"] , reverse=__magic_name__ ) if top_k: __snake_case : str = results[:top_k] return results def lowercase__ ( self : Union[str, Any] , __magic_name__ : "torch.Tensor" ) -> Dict[str, int]: """simple docstring""" if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" ) __snake_case , __snake_case , __snake_case , __snake_case : List[str] = box.int().tolist() __snake_case : List[Any] = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
13
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]: """simple docstring""" __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[Any] = num_channels __snake_case : List[str] = patch_size __snake_case : List[str] = num_frames __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : str = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = attention_type __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scope __snake_case : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case : str = (image_size // patch_size) ** 2 __snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case : str = self.num_labels return config def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" __snake_case : Optional[int] = TimesformerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str: """simple docstring""" __snake_case : Any = TimesformerForVideoClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) # verify the logits shape __snake_case : Dict = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__: List[Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__: List[str] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : List[str] = TimesformerModelTester(self ) __snake_case : List[Any] = ConfigTester( self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int: """simple docstring""" __snake_case : Dict = copy.deepcopy(__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = TimesformerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : List[str] = self.model_tester.seq_length __snake_case : Tuple = self.model_tester.num_frames __snake_case : str = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Dict = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Optional[int] = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case : int = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Optional[int] = True __snake_case : Optional[int] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : List[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ): __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.hidden_states __snake_case : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : List[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( __magic_name__ ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : Dict = prepare_video() __snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _A ( __lowercase ): lowercase__: Tuple = '''gpt_neo''' lowercase__: int = ['''past_key_values'''] lowercase__: str = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Optional[Any] , __magic_name__ : str=5_02_57 , __magic_name__ : Optional[Any]=20_48 , __magic_name__ : Optional[Any]=20_48 , __magic_name__ : Optional[int]=24 , __magic_name__ : Union[str, Any]=[[["global", "local"], 12]] , __magic_name__ : Any=16 , __magic_name__ : Any=None , __magic_name__ : Optional[int]=2_56 , __magic_name__ : List[Any]="gelu_new" , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : List[str]=0.0 , __magic_name__ : Any=0.1 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : Any=0.02 , __magic_name__ : Tuple=True , __magic_name__ : int=5_02_56 , __magic_name__ : Optional[Any]=5_02_56 , **__magic_name__ : Union[str, Any] , ) -> List[Any]: """simple docstring""" __snake_case : List[Any] = vocab_size __snake_case : List[str] = max_position_embeddings __snake_case : List[str] = hidden_size __snake_case : Optional[Any] = num_layers __snake_case : Optional[int] = num_heads __snake_case : int = intermediate_size __snake_case : Any = window_size __snake_case : Any = activation_function __snake_case : Dict = resid_dropout __snake_case : Union[str, Any] = embed_dropout __snake_case : Optional[Any] = attention_dropout __snake_case : Optional[int] = classifier_dropout __snake_case : Any = layer_norm_epsilon __snake_case : List[str] = initializer_range __snake_case : Any = use_cache __snake_case : Dict = bos_token_id __snake_case : Optional[int] = eos_token_id __snake_case : str = attention_types __snake_case : str = self.expand_attention_types_params(__magic_name__ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.attention_layers)` == `config.num_layers` """ f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' """`config.attention_layers` is prepared using `config.attention_types`. """ """Please verify the value of `config.attention_types` argument.""" ) super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) @staticmethod def lowercase__ ( __magic_name__ : str ) -> List[str]: """simple docstring""" __snake_case : Optional[Any] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" import torch __snake_case : List[Any] = input.size() __snake_case : Optional[Any] = len(_lowerCamelCase ) __snake_case : Tuple = shape[dimension] __snake_case : Union[str, Any] = torch.arange(0 , _lowerCamelCase , _lowerCamelCase ) __snake_case : Optional[Any] = torch.div(sizedim - size , _lowerCamelCase , rounding_mode="""floor""" ) + 1 __snake_case : Any = torch.arange(_lowerCamelCase ) + low_indices[:min_length][:, None] __snake_case : Dict = [slice(_lowerCamelCase )] * rank __snake_case : Optional[int] = indices __snake_case : List[Any] = input[s] __snake_case : List[Any] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" import torch __snake_case : Dict = torch.arange(1 , _lowerCamelCase ) __snake_case : Optional[Any] = torch.remainder(_lowerCamelCase , _lowerCamelCase ) __snake_case : List[str] = remainders == 0 __snake_case : int = candidates[divisor_indices] __snake_case : Optional[Any] = torch.max(_lowerCamelCase ) return largest_divisor, torch.div(_lowerCamelCase , _lowerCamelCase , rounding_mode="""floor""" ) class _A ( __lowercase ): @property def lowercase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : List[Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) __snake_case : List[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: __snake_case : Dict = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowercase__ ( self : Optional[int] ) -> int: """simple docstring""" return self._config.num_heads def lowercase__ ( self : List[Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __snake_case : int = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() __snake_case : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __snake_case , __snake_case : Optional[Any] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __snake_case : List[str] = seqlen + 2 __snake_case : Optional[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : Optional[int] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] __snake_case : List[Any] = common_inputs["""attention_mask"""] if self.use_past: __snake_case : str = ordered_inputs["""attention_mask"""].dtype __snake_case : Tuple = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return 13
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ConditionalDetrFeatureExtractor"] __UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __UpperCamelCase = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ViTFeatureExtractor"] __UpperCamelCase = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : str = 0 __snake_case : Optional[int] = len(_lowerCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" if len(_lowerCamelCase ) <= 1: return arr, 0 __snake_case : Any = len(_lowerCamelCase ) // 2 __snake_case : List[str] = arr[0:mid] __snake_case : int = arr[mid:] __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase ) __snake_case : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = [] __snake_case : List[str] = 0 while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , _lowerCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __snake_case : Any = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) # an empty list should also have zero inversions __snake_case : List[Any] = [] __snake_case : List[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' from __future__ import annotations __UpperCamelCase = "Muhammad Umer Farooq" __UpperCamelCase = "MIT" __UpperCamelCase = "1.0.0" __UpperCamelCase = "Muhammad Umer Farooq" __UpperCamelCase = "[email protected]" __UpperCamelCase = "Alpha" import re from html.parser import HTMLParser from urllib import parse import requests class _A ( __lowercase ): def __init__( self : List[Any] , __magic_name__ : str ) -> None: """simple docstring""" super().__init__() __snake_case : list[str] = [] __snake_case : Optional[int] = domain def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : list[tuple[str, str | None]] ) -> None: """simple docstring""" if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __snake_case : Union[str, Any] = parse.urljoin(self.domain , __magic_name__ ) self.urls.append(__magic_name__ ) def _a ( _lowerCamelCase ) -> str: """simple docstring""" return ".".join(get_sub_domain_name(_lowerCamelCase ).split(""".""" )[-2:] ) def _a ( _lowerCamelCase ) -> str: """simple docstring""" return parse.urlparse(_lowerCamelCase ).netloc def _a ( _lowerCamelCase = "https://github.com" ) -> list[str]: """simple docstring""" __snake_case : Union[str, Any] = get_domain_name(_lowerCamelCase ) # Initialize the parser __snake_case : List[str] = Parser(_lowerCamelCase ) try: # Open URL __snake_case : Any = requests.get(_lowerCamelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __snake_case : List[Any] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __snake_case : Union[str, Any] = requests.get(_lowerCamelCase ) # Get the valid email. __snake_case : Optional[Any] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(_lowerCamelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = emails_from_url("https://github.com") print(f"""{len(emails)} emails found:""") print("\n".join(sorted(emails)))
13
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
13
1
'''simple docstring''' import math import sys def _a ( _lowerCamelCase ) -> int: """simple docstring""" if number != int(_lowerCamelCase ): raise ValueError("""the value of input must be a natural number""" ) if number < 0: raise ValueError("""the value of input must not be a negative number""" ) if number == 0: return 1 __snake_case : Union[str, Any] = [-1] * (number + 1) __snake_case : List[Any] = 0 for i in range(1 , number + 1 ): __snake_case : Union[str, Any] = sys.maxsize __snake_case : str = int(math.sqrt(_lowerCamelCase ) ) for j in range(1 , root + 1 ): __snake_case : int = 1 + answers[i - (j**2)] __snake_case : Dict = min(_lowerCamelCase , _lowerCamelCase ) __snake_case : Optional[Any] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = CanineTokenizer lowercase__: Optional[int] = False def lowercase__ ( self : Any ) -> Any: """simple docstring""" super().setUp() __snake_case : Dict = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer: """simple docstring""" __snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) __snake_case : Optional[Any] = 10_24 return tokenizer @require_torch def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[Any] = self.canine_tokenizer __snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off __snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Any = self.canine_tokenizer __snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] __snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.canine_tokenizer __snake_case : Optional[Any] = [ """What's the weater?""", """It's about 25 degrees.""", ] __snake_case : Any = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Dict = tempfile.mkdtemp() __snake_case : str = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) __snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Optional[int] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __snake_case : List[Any] = chr(0xE007 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE005 __snake_case : Tuple = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) __snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) __snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) __snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) __snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : Dict = chr(0xE005 ) __snake_case : str = chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) __snake_case : Tuple = tokenizer.tokenize(__magic_name__ ) __snake_case : Any = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __snake_case : Optional[Any] = 0xE006 __snake_case : List[str] = chr(__magic_name__ ) __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Any = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Tuple = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE006 __snake_case : int = chr(__magic_name__ ) __snake_case : List[Any] = [new_token_a] __snake_case : Union[str, Any] = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __snake_case : Any = 0xE007 __snake_case : Any = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] __snake_case : Union[str, Any] = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : List[str] = """hello world""" if self.space_between_special_tokens: __snake_case : Union[str, Any] = """[CLS] hello world [SEP]""" else: __snake_case : List[Any] = input __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowercase__ ( self : Tuple ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : str = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __snake_case : Dict = """a""" __snake_case : Tuple = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) __snake_case : Dict = 0xE006 __snake_case : str = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" pass def lowercase__ ( self : str ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass
13
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
13
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
13
1
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _a ( _lowerCamelCase ) -> int: """simple docstring""" __snake_case , __snake_case : Optional[int] = image.size __snake_case , __snake_case : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __snake_case : List[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) __snake_case : int = np.array(_lowerCamelCase ).astype(np.floataa ) / 2_55.0 __snake_case : Tuple = image[None].transpose(0 , 3 , 1 , 2 ) __snake_case : Optional[Any] = torch.from_numpy(_lowerCamelCase ) return 2.0 * image - 1.0 class _A ( __lowercase ): def __init__( self : Any , __magic_name__ : VQModel , __magic_name__ : UNetaDModel , __magic_name__ : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ) -> str: """simple docstring""" super().__init__() self.register_modules(vqvae=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ ) @torch.no_grad() def __call__( self : Optional[int] , __magic_name__ : Union[torch.Tensor, PIL.Image.Image] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : Optional[int] = 1_00 , __magic_name__ : Optional[float] = 0.0 , __magic_name__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" if isinstance(__magic_name__ , PIL.Image.Image ): __snake_case : Union[str, Any] = 1 elif isinstance(__magic_name__ , torch.Tensor ): __snake_case : List[Any] = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__magic_name__ )}''' ) if isinstance(__magic_name__ , PIL.Image.Image ): __snake_case : List[str] = preprocess(__magic_name__ ) __snake_case , __snake_case : int = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __snake_case : List[str] = (batch_size, self.unet.config.in_channels // 2, height, width) __snake_case : Union[str, Any] = next(self.unet.parameters() ).dtype __snake_case : Optional[Any] = randn_tensor(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ ) __snake_case : Tuple = image.to(device=self.device , dtype=__magic_name__ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(__magic_name__ , device=self.device ) __snake_case : List[Any] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __snake_case : int = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case : str = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __snake_case : Optional[int] = {} if accepts_eta: __snake_case : Tuple = eta for t in self.progress_bar(__magic_name__ ): # concat latents and low resolution image in the channel dimension. __snake_case : int = torch.cat([latents, image] , dim=1 ) __snake_case : List[str] = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ ) # predict the noise residual __snake_case : Optional[Any] = self.unet(__magic_name__ , __magic_name__ ).sample # compute the previous noisy sample x_t -> x_t-1 __snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample # decode the image latents with the VQVAE __snake_case : Any = self.vqvae.decode(__magic_name__ ).sample __snake_case : Union[str, Any] = torch.clamp(__magic_name__ , -1.0 , 1.0 ) __snake_case : Any = image / 2 + 0.5 __snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __snake_case : Optional[int] = self.numpy_to_pil(__magic_name__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=__magic_name__ )
13
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class _A ( __lowercase ): lowercase__: str = '''codegen''' lowercase__: Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int: """simple docstring""" __snake_case : List[str] = vocab_size __snake_case : Union[str, Any] = n_ctx __snake_case : int = n_positions __snake_case : str = n_embd __snake_case : Dict = n_layer __snake_case : List[Any] = n_head __snake_case : Any = n_inner __snake_case : str = rotary_dim __snake_case : List[str] = activation_function __snake_case : Tuple = resid_pdrop __snake_case : Dict = embd_pdrop __snake_case : int = attn_pdrop __snake_case : Tuple = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = use_cache __snake_case : Dict = bos_token_id __snake_case : Union[str, Any] = eos_token_id super().__init__( bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ ) class _A ( __lowercase ): def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple: """simple docstring""" super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ ) if not getattr(self._config , """pad_token_id""" , __magic_name__ ): # TODO: how to do that better? __snake_case : List[str] = 0 @property def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) __snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: __snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return self._config.n_layer @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.n_head def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() __snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __snake_case , __snake_case : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __snake_case : Tuple = seqlen + 2 __snake_case : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : List[str] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] __snake_case : Optional[int] = common_inputs["""attention_mask"""] if self.use_past: __snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype __snake_case : Optional[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return 13
13
1
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __UpperCamelCase = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ) -> List[str]: """simple docstring""" if attention_mask is None: __snake_case : int = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __snake_case : Tuple = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __snake_case : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __snake_case : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __snake_case : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _A : def __init__( self : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple=13 , __magic_name__ : Any=7 , __magic_name__ : List[str]=True , __magic_name__ : Dict=False , __magic_name__ : Union[str, Any]=99 , __magic_name__ : List[str]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Dict=4 , __magic_name__ : int=4 , __magic_name__ : List[str]="gelu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Any=32 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[int]=0 , __magic_name__ : Optional[int]=0.02 , ) -> int: """simple docstring""" __snake_case : Optional[int] = parent __snake_case : str = batch_size __snake_case : Optional[Any] = seq_length __snake_case : List[Any] = is_training __snake_case : List[Any] = use_labels __snake_case : int = vocab_size __snake_case : Tuple = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Tuple = num_attention_heads __snake_case : Any = intermediate_size __snake_case : Optional[Any] = hidden_act __snake_case : List[str] = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Optional[Any] = max_position_embeddings __snake_case : Optional[Any] = eos_token_id __snake_case : Optional[Any] = pad_token_id __snake_case : Tuple = bos_token_id __snake_case : int = initializer_range def lowercase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" __snake_case : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __snake_case : Dict = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __snake_case : List[Any] = shift_tokens_right(__magic_name__ , 1 , 2 ) __snake_case : Optional[Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__magic_name__ , ) __snake_case : List[str] = prepare_blenderbot_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ ) return config, inputs_dict def lowercase__ ( self : Any ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : Optional[Any] = self.prepare_config_and_inputs() return config, inputs_dict def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : int ) -> int: """simple docstring""" __snake_case : Union[str, Any] = 20 __snake_case : List[str] = model_class_name(__magic_name__ ) __snake_case : int = model.encode(inputs_dict["""input_ids"""] ) __snake_case , __snake_case : Optional[Any] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __snake_case : Dict = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __snake_case : Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __snake_case : Optional[int] = model.decode( decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , ) __snake_case : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __snake_case : Tuple = model.decode( decoder_input_ids[:, -1:] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__magic_name__ , ) __snake_case : str = model.decode(__magic_name__ , __magic_name__ ) __snake_case : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] ) -> int: """simple docstring""" __snake_case : Any = 20 __snake_case : Union[str, Any] = model_class_name(__magic_name__ ) __snake_case : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __snake_case , __snake_case : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __snake_case : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __snake_case : List[Any] = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __snake_case : Any = model.decode( decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , ) __snake_case : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __snake_case : List[Any] = model.decode( decoder_input_ids[:, -1:] , __magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__magic_name__ , decoder_position_ids=__magic_name__ , ) __snake_case : Dict = model.decode(__magic_name__ , __magic_name__ , decoder_attention_mask=__magic_name__ ) __snake_case : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) @require_flax class _A ( unittest.TestCase ): lowercase__: Dict = 99 def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" __snake_case : str = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) __snake_case : Optional[int] = input_ids.shape[0] __snake_case : Optional[Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case , __snake_case , __snake_case : Optional[int] = self._get_config_and_data() __snake_case : int = FlaxBlenderbotSmallForConditionalGeneration(__magic_name__ ) __snake_case : Tuple = lm_model(input_ids=__magic_name__ ) __snake_case : Tuple = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["""logits"""].shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : List[str] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) __snake_case : str = FlaxBlenderbotSmallForConditionalGeneration(__magic_name__ ) __snake_case : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) __snake_case : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) __snake_case : Dict = lm_model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ) __snake_case : Dict = (*summary.shape, config.vocab_size) self.assertEqual(outputs["""logits"""].shape , __magic_name__ ) def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : str = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) __snake_case : Union[str, Any] = shift_tokens_right(__magic_name__ , 1 , 2 ) __snake_case : List[str] = np.equal(__magic_name__ , 1 ).astype(np.floataa ).sum() __snake_case : Dict = np.equal(__magic_name__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(__magic_name__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _A ( __lowercase , unittest.TestCase , __lowercase ): lowercase__: str = True lowercase__: List[Any] = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) lowercase__: Tuple = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def lowercase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = FlaxBlenderbotSmallModelTester(self ) def lowercase__ ( self : Dict ) -> Any: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> str: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __snake_case : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = model_class(__magic_name__ ) @jax.jit def encode_jitted(__magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any]=None , **__magic_name__ : Optional[Any] ): return model.encode(input_ids=__magic_name__ , attention_mask=__magic_name__ ) with self.subTest("""JIT Enabled""" ): __snake_case : Dict = encode_jitted(**__magic_name__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __snake_case : str = encode_jitted(**__magic_name__ ).to_tuple() self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) for jitted_output, output in zip(__magic_name__ , __magic_name__ ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __snake_case : List[Any] = model_class(__magic_name__ ) __snake_case : List[Any] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __snake_case : str = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__magic_name__ : Any , __magic_name__ : str , __magic_name__ : str ): return model.decode( decoder_input_ids=__magic_name__ , decoder_attention_mask=__magic_name__ , encoder_outputs=__magic_name__ , ) with self.subTest("""JIT Enabled""" ): __snake_case : List[Any] = decode_jitted(**__magic_name__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __snake_case : Optional[Any] = decode_jitted(**__magic_name__ ).to_tuple() self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) for jitted_output, output in zip(__magic_name__ , __magic_name__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase__ ( self : Optional[Any] ) -> str: """simple docstring""" for model_class_name in self.all_model_classes: __snake_case : Dict = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __snake_case : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id __snake_case : Any = model(__magic_name__ ) self.assertIsNotNone(__magic_name__ )
13
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( __lowercase , unittest.TestCase ): lowercase__: int = KandinskyImgaImgPipeline lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] lowercase__: int = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] lowercase__: List[Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowercase__: Any = False @property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return 32 @property def lowercase__ ( self : str ) -> str: """simple docstring""" return 32 @property def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" return self.time_input_dim @property def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return 1_00 @property def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" __snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) __snake_case : Tuple = MultilingualCLIP(__magic_name__ ) __snake_case : Optional[Any] = text_encoder.eval() return text_encoder @property def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __snake_case : Tuple = UNetaDConditionModel(**__magic_name__ ) return model @property def lowercase__ ( self : str ) -> Dict: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : Tuple = self.dummy_text_encoder __snake_case : Dict = self.dummy_tokenizer __snake_case : Dict = self.dummy_unet __snake_case : int = self.dummy_movq __snake_case : List[Any] = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __snake_case : Dict = DDIMScheduler(**__magic_name__ ) __snake_case : Any = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str: """simple docstring""" __snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ ) # create init_image __snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__magic_name__ ).startswith("""mps""" ): __snake_case : str = torch.manual_seed(__magic_name__ ) else: __snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : Optional[Any] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : Dict = """cpu""" __snake_case : Union[str, Any] = self.get_dummy_components() __snake_case : List[str] = self.pipeline_class(**__magic_name__ ) __snake_case : Optional[Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) ) __snake_case : List[str] = output.images __snake_case : Any = pipe( **self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0] __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : int = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) __snake_case : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __snake_case : List[Any] = """A red cartoon frog, 4k""" __snake_case : str = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__magic_name__ ) __snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) __snake_case : Any = pipeline.to(__magic_name__ ) pipeline.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case , __snake_case : Optional[Any] = pipe_prior( __magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __snake_case : List[str] = pipeline( __magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) __snake_case : Dict = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
13
1
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class _A ( unittest.TestCase ): def __init__( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=13 , __magic_name__ : List[Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Tuple=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Dict=99 , __magic_name__ : List[str]=32 , __magic_name__ : Optional[int]=5 , __magic_name__ : Tuple=4 , __magic_name__ : Dict=37 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=5_12 , __magic_name__ : List[str]=16 , __magic_name__ : Tuple=2 , __magic_name__ : List[str]=0.02 , __magic_name__ : List[Any]=4 , ) -> Optional[int]: """simple docstring""" __snake_case : Dict = parent __snake_case : Union[str, Any] = batch_size __snake_case : Union[str, Any] = seq_length __snake_case : int = is_training __snake_case : List[str] = use_attention_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : int = vocab_size __snake_case : Optional[Any] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : Any = hidden_act __snake_case : List[str] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : Optional[Any] = initializer_range __snake_case : Optional[int] = num_choices def lowercase__ ( self : str ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Optional[Any] = None if self.use_attention_mask: __snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : int = None if self.use_token_type_ids: __snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Optional[int] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = config_and_inputs __snake_case : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class _A ( __lowercase , unittest.TestCase ): lowercase__: Optional[Any] = True lowercase__: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : int = FlaxRoFormerModelTester(self ) @slow def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: __snake_case : List[str] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ ) __snake_case : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ ) @require_flax class _A ( unittest.TestCase ): @slow def lowercase__ ( self : Any ) -> Dict: """simple docstring""" __snake_case : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) __snake_case : List[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] ) __snake_case : Tuple = model(__magic_name__ )[0] __snake_case : Any = 5_00_00 __snake_case : Tuple = (1, 6, vocab_size) self.assertEqual(output.shape , __magic_name__ ) __snake_case : Tuple = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __UpperCamelCase = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __UpperCamelCase = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } class _A ( __lowercase ): lowercase__: Any = VOCAB_FILES_NAMES lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask'''] lowercase__: List[str] = BartTokenizer def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]: """simple docstring""" super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) __snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) ) __snake_case : str = add_prefix_space __snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ ) __snake_case : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __snake_case : Any = """post_processor""" __snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: __snake_case : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __snake_case : Tuple = tuple(state["""sep"""] ) if "cls" in state: __snake_case : int = tuple(state["""cls"""] ) __snake_case : Optional[int] = False if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : Optional[Any] = add_prefix_space __snake_case : List[str] = True if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets: __snake_case : Optional[int] = trim_offsets __snake_case : Any = True if changes_to_apply: __snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) ) __snake_case : List[Any] = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def lowercase__ ( self : List[Any] ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value __snake_case : Union[str, Any] = value def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding: """simple docstring""" __snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding: """simple docstring""" __snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Optional[int] = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
13
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( __lowercase ): lowercase__: Dict = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) lowercase__: Tuple = '''CIDAS/clipseg-rd64-refined''' lowercase__: Optional[Any] = '''image_segmenter''' lowercase__: Optional[Any] = CLIPSegForImageSegmentation lowercase__: Tuple = ['''image''', '''text'''] lowercase__: List[Any] = ['''image'''] def __init__( self : Dict , *__magic_name__ : int , **__magic_name__ : Any ) -> List[str]: """simple docstring""" requires_backends(self , ["""vision"""] ) super().__init__(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Any , __magic_name__ : "Image" , __magic_name__ : str ) -> List[Any]: """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=__magic_name__ , return_tensors="""pt""" ) def lowercase__ ( self : Tuple , __magic_name__ : int ) -> List[str]: """simple docstring""" with torch.no_grad(): __snake_case : str = self.model(**__magic_name__ ).logits return logits def lowercase__ ( self : Tuple , __magic_name__ : Tuple ) -> Optional[Any]: """simple docstring""" __snake_case : List[str] = outputs.cpu().detach().numpy() __snake_case : str = 0 __snake_case : str = 1 return Image.fromarray((array * 2_55).astype(np.uinta ) )
13
'''simple docstring''' import os import numpy import onnx def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Optional[int] = a.name __snake_case : Dict = b.name __snake_case : Optional[int] = """""" __snake_case : int = """""" __snake_case : Any = a == b __snake_case : List[Any] = name_a __snake_case : List[str] = name_b return res def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_lowerCamelCase , _lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Dict = list(model.graph.initializer ) __snake_case : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __snake_case : Tuple = inits[i].name __snake_case : Tuple = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : str = os.path.dirname(_lowerCamelCase ) __snake_case : Dict = os.path.basename(_lowerCamelCase ) __snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) ) __snake_case : Dict = list(model.graph.initializer ) __snake_case : Optional[int] = set() __snake_case : Optional[Any] = {} __snake_case : Tuple = [] __snake_case : List[Any] = 0 for i in range(len(_lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(_lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_lowerCamelCase ) dup_set.add(_lowerCamelCase ) __snake_case : List[Any] = inits[j].data_type __snake_case : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , _lowerCamelCase ) total_reduced_size += mem_size __snake_case : Any = inits[i].name __snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(_lowerCamelCase ) else: __snake_case : Dict = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) __snake_case : int = sorted(_lowerCamelCase ) _remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : str = """optimized_""" + model_file_name __snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) onnx.save(_lowerCamelCase , _lowerCamelCase ) return new_model
13
1
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , _lowerCamelCase ) __snake_case : List[str] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: __snake_case : Optional[Any] = dataset_size < in_memory_max_size else: __snake_case : List[str] = False __snake_case : List[Any] = is_small_dataset(_lowerCamelCase ) assert result == expected
13
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase = ["small", "medium", "large"] __UpperCamelCase = "lm_head.decoder.weight" __UpperCamelCase = "lm_head.weight" def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : Optional[int] = torch.load(_lowerCamelCase ) __snake_case : Optional[int] = d.pop(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) __UpperCamelCase = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") __UpperCamelCase = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
13
1
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple=13 , __magic_name__ : Optional[Any]=10 , __magic_name__ : Any=3 , __magic_name__ : str=2 , __magic_name__ : Tuple=2 , __magic_name__ : int=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Dict=True , __magic_name__ : str=32 , __magic_name__ : str=5 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Any=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Dict=10 , __magic_name__ : Any=0.02 , __magic_name__ : Optional[Any]=0.9 , __magic_name__ : str=None , ) -> Dict: """simple docstring""" __snake_case : Optional[int] = parent __snake_case : List[Any] = batch_size __snake_case : Dict = image_size __snake_case : Union[str, Any] = num_channels __snake_case : Optional[Any] = patch_size __snake_case : List[str] = tubelet_size __snake_case : Union[str, Any] = num_frames __snake_case : List[str] = is_training __snake_case : str = use_labels __snake_case : str = hidden_size __snake_case : Dict = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Any = mask_ratio __snake_case : List[str] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame __snake_case : Optional[Any] = (image_size // patch_size) ** 2 __snake_case : List[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos __snake_case : Union[str, Any] = int(mask_ratio * self.seq_length ) def lowercase__ ( self : Any ) -> Tuple: """simple docstring""" __snake_case : int = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : List[Any] = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Dict ) -> Dict: """simple docstring""" return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def lowercase__ ( self : Any , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Any ) -> Optional[int]: """simple docstring""" __snake_case : str = VideoMAEModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : List[Any] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = VideoMAEForPreTraining(__magic_name__ ) model.to(__magic_name__ ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __snake_case : Tuple = torch.ones((self.num_masks,) ) __snake_case : Tuple = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) __snake_case : Any = mask.expand(self.batch_size , -1 ).bool() __snake_case : int = model(__magic_name__ , __magic_name__ ) # model only returns predictions for masked patches __snake_case : Tuple = mask.sum().item() __snake_case : Union[str, Any] = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def lowercase__ ( self : str ) -> Optional[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : List[str] = config_and_inputs __snake_case : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowercase__: int = ( {'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification} if is_torch_available() else {} ) lowercase__: int = False lowercase__: Optional[int] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Tuple = VideoMAEModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int=False ) -> Tuple: """simple docstring""" __snake_case : Tuple = copy.deepcopy(__magic_name__ ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch __snake_case : int = torch.ones((self.model_tester.num_masks,) ) __snake_case : str = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) __snake_case : Union[str, Any] = mask.expand(self.model_tester.batch_size , -1 ).bool() __snake_case : List[Any] = bool_masked_pos.to(__magic_name__ ) if return_labels: if model_class in [ *get_values(__magic_name__ ), ]: __snake_case : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""VideoMAE does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> str: """simple docstring""" pass def lowercase__ ( self : str ) -> int: """simple docstring""" __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__magic_name__ ) __snake_case : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Any = [*signature.parameters.keys()] __snake_case : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__magic_name__ ) @slow def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = VideoMAEModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : List[str] = True for model_class in self.all_model_classes: __snake_case : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks __snake_case : Any = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) __snake_case : Optional[int] = True __snake_case : Union[str, Any] = False __snake_case : Any = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Union[str, Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Optional[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __snake_case : Any = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Tuple = True __snake_case : Union[str, Any] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : Any = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def lowercase__ ( self : List[Any] ) -> Tuple: """simple docstring""" def check_hidden_states_output(__magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[str] ): __snake_case : List[Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : List[str] = outputs.hidden_states __snake_case : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks __snake_case : str = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Optional[Any] = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" pass def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : Optional[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Dict ) -> List[Any]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Any ) -> List[Any]: """simple docstring""" __snake_case : int = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to( __magic_name__ ) __snake_case : Any = self.default_image_processor __snake_case : Any = prepare_video() __snake_case : int = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Union[str, Any] = model(**__magic_name__ ) # verify the logits __snake_case : Dict = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Tuple = torch.tensor([0.3669, -0.0688, -0.2421] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) ) @slow def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" __snake_case : Any = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(__magic_name__ ) __snake_case : str = self.default_image_processor __snake_case : List[str] = prepare_video() __snake_case : List[Any] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ ) # add boolean mask, indicating which patches to mask __snake_case : List[Any] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) __snake_case : Any = torch.load(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : str = model(**__magic_name__ ) # verify the logits __snake_case : Tuple = torch.Size([1, 14_08, 15_36] ) __snake_case : Any = torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=__magic_name__ ) self.assertEqual(outputs.logits.shape , __magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) __snake_case : Union[str, Any] = torch.tensor([0.5142] , device=__magic_name__ ) self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) __snake_case : Tuple = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=__magic_name__ ).to( __magic_name__ ) with torch.no_grad(): __snake_case : Optional[Any] = model(**__magic_name__ ) __snake_case : str = torch.tensor(torch.tensor([0.6469] ) , device=__magic_name__ ) self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1E-4 ) )
13
'''simple docstring''' __UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _a ( ) -> None: """simple docstring""" __snake_case : Dict = input("""Enter message: """ ) __snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ ) __snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): __snake_case : Any = """encrypt""" __snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase ) elif mode.lower().startswith("""d""" ): __snake_case : Optional[int] = """decrypt""" __snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase ) print(F'''\n{mode.title()}ed message:''' ) print(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : str = [] __snake_case : Dict = 0 __snake_case : Optional[int] = key.upper() for symbol in message: __snake_case : Any = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_lowerCamelCase ): __snake_case : Tuple = 0 else: translated.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> List[Any]: """simple docstring""" __snake_case : Dict = [] for _ in range(_lowerCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> Union[str, Any]: """simple docstring""" __snake_case : str = [] for step in range(_lowerCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: __snake_case : Any = os.path.join(_lowerCamelCase , """schedule.bin""" ) torch.save(scheduler.state_dict() , _lowerCamelCase ) __snake_case : Tuple = torch.load(_lowerCamelCase ) scheduler.load_state_dict(_lowerCamelCase ) return lrs @require_torch class _A ( unittest.TestCase ): def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) for a, b in zip(__magic_name__ , __magic_name__ ): self.assertAlmostEqual(__magic_name__ , __magic_name__ , delta=__magic_name__ ) def lowercase__ ( self : int ) -> Dict: """simple docstring""" __snake_case : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__magic_name__ ) __snake_case : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) __snake_case : Dict = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __snake_case : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_00 ): __snake_case : List[Any] = criterion(__magic_name__ , __magic_name__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def lowercase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __snake_case : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__magic_name__ ) __snake_case : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) __snake_case : Optional[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __snake_case : Tuple = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__magic_name__ , weight_decay=0.0 , relative_step=__magic_name__ , scale_parameter=__magic_name__ , warmup_init=__magic_name__ , ) for _ in range(10_00 ): __snake_case : Any = criterion(__magic_name__ , __magic_name__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class _A ( unittest.TestCase ): lowercase__: Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None lowercase__: str = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None lowercase__: Any = 10 def lowercase__ ( self : int , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Any=None ) -> List[str]: """simple docstring""" self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) for a, b in zip(__magic_name__ , __magic_name__ ): self.assertAlmostEqual(__magic_name__ , __magic_name__ , delta=__magic_name__ , msg=__magic_name__ ) def lowercase__ ( self : Dict ) -> Any: """simple docstring""" __snake_case : Union[str, Any] = {"""num_warmup_steps""": 2, """num_training_steps""": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) __snake_case : Tuple = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"""num_warmup_steps""": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, """num_cycles""": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, """power""": 2.0, """lr_end""": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"""num_warmup_steps""": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): __snake_case , __snake_case : Tuple = data __snake_case : Union[str, Any] = scheduler_func(self.optimizer , **__magic_name__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) __snake_case : Tuple = unwrap_schedule(__magic_name__ , self.num_steps ) self.assertListAlmostEqual( __magic_name__ , __magic_name__ , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) __snake_case : Optional[Any] = scheduler_func(self.optimizer , **__magic_name__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(__magic_name__ ) # wrap to test picklability of the schedule __snake_case : Any = unwrap_and_save_reload_schedule(__magic_name__ , self.num_steps ) self.assertListEqual(__magic_name__ , __magic_name__ , msg=f'''failed for {scheduler_func} in save and reload''' ) class _A : def __init__( self : Optional[int] , __magic_name__ : Union[str, Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = fn def __call__( self : Union[str, Any] , *__magic_name__ : Union[str, Any] , **__magic_name__ : List[Any] ) -> Optional[Any]: """simple docstring""" return self.fn(*__magic_name__ , **__magic_name__ ) @classmethod def lowercase__ ( self : Any , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Dict = list(map(self , scheduler.lr_lambdas ) )
13
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for attribute in key.split(""".""" ): __snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case : Union[str, Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : Tuple = value elif weight_type == "bias": __snake_case : str = value else: __snake_case : List[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = [] __snake_case : List[Any] = fairseq_model.state_dict() __snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : Any = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __snake_case : Dict = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2] __snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase ) if "weight_g" in name: __snake_case : Dict = """weight_g""" elif "weight_v" in name: __snake_case : List[str] = """weight_v""" elif "weight" in name: __snake_case : str = """weight""" elif "bias" in name: __snake_case : int = """bias""" else: __snake_case : int = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Dict = full_name.split("""conv_layers.""" )[-1] __snake_case : Optional[int] = name.split(""".""" ) __snake_case : Dict = int(items[0] ) __snake_case : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case : List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : List[str] = SEWConfig() if is_finetuned: __snake_case : List[Any] = model.wav_encoder.wav_model.cfg else: __snake_case : Optional[Any] = model.cfg __snake_case : Tuple = fs_config.conv_bias __snake_case : List[Any] = eval(fs_config.conv_feature_layers ) __snake_case : List[Any] = [x[0] for x in conv_layers] __snake_case : Dict = [x[1] for x in conv_layers] __snake_case : Tuple = [x[2] for x in conv_layers] __snake_case : List[str] = """gelu""" __snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" __snake_case : Optional[int] = 0.0 __snake_case : Optional[Any] = fs_config.activation_fn.name __snake_case : Dict = fs_config.encoder_embed_dim __snake_case : Dict = 0.02 __snake_case : Any = fs_config.encoder_ffn_embed_dim __snake_case : Tuple = 1E-5 __snake_case : Dict = fs_config.encoder_layerdrop __snake_case : Any = fs_config.encoder_attention_heads __snake_case : int = fs_config.conv_pos_groups __snake_case : Tuple = fs_config.conv_pos __snake_case : Optional[int] = len(_lowerCamelCase ) __snake_case : int = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : Union[str, Any] = model.cfg __snake_case : Tuple = fs_config.final_dropout __snake_case : Tuple = fs_config.layerdrop __snake_case : Any = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : List[Any] = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : str = fs_config.mask_channel_length __snake_case : Any = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : str = fs_config.mask_prob __snake_case : str = """Wav2Vec2FeatureExtractor""" __snake_case : Dict = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int: """simple docstring""" if is_finetuned: __snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase ) else: __snake_case : int = convert_config(model[0] , _lowerCamelCase ) __snake_case : Dict = model[0].eval() __snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __snake_case : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if is_finetuned: if dict_path: __snake_case : str = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Union[str, Any] = target_dict.pad_index __snake_case : Optional[Any] = target_dict.bos_index __snake_case : Tuple = target_dict.pad_index __snake_case : List[str] = target_dict.bos_index __snake_case : Optional[Any] = target_dict.eos_index __snake_case : List[str] = len(target_dict.symbols ) __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" ) if not os.path.isdir(_lowerCamelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _lowerCamelCase ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case : List[str] = SEWForCTC(_lowerCamelCase ) else: __snake_case : List[str] = SEWModel(_lowerCamelCase ) feature_extractor.save_pretrained(_lowerCamelCase ) recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __UpperCamelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
13
1
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _A ( __lowercase ): def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Dict=10_24 , __magic_name__ : List[Any]=10_24 , __magic_name__ : Optional[Any]=3.6 ) -> int: """simple docstring""" __snake_case : Optional[Any] = tokenizer __snake_case : Tuple = tokenizer.bos_token_id __snake_case : Optional[int] = dataset __snake_case : int = seq_length __snake_case : Optional[Any] = seq_length * chars_per_token * num_of_sequences def __iter__( self : Optional[int] ) -> Dict: """simple docstring""" __snake_case : int = iter(self.dataset ) __snake_case : str = True while more_examples: __snake_case , __snake_case : str = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__magic_name__ )["""content"""] ) buffer_len += len(buffer[-1] ) except StopIteration: __snake_case : Union[str, Any] = False break __snake_case : List[Any] = tokenizer(__magic_name__ , truncation=__magic_name__ )["""input_ids"""] __snake_case : Union[str, Any] = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__magic_name__ ) , self.seq_length ): __snake_case : Tuple = all_token_ids[i : i + self.seq_length] if len(__magic_name__ ) == self.seq_length: yield torch.tensor(__magic_name__ ) def _a ( _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = {"""streaming""": True} __snake_case : int = load_dataset(args.dataset_name , split="""train""" , **_lowerCamelCase ) __snake_case : Any = ConstantLengthDataset(_lowerCamelCase , _lowerCamelCase , seq_length=args.seq_length ) __snake_case : int = DataLoader(_lowerCamelCase , batch_size=args.batch_size ) return eval_dataloader def _a ( _lowerCamelCase ) -> Any: """simple docstring""" model.eval() __snake_case : Dict = [] for step, batch in enumerate(_lowerCamelCase ): with torch.no_grad(): __snake_case : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) __snake_case : Tuple = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(_lowerCamelCase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __snake_case : Tuple = torch.mean(torch.cat(_lowerCamelCase ) ) try: __snake_case : Optional[Any] = torch.exp(_lowerCamelCase ) except OverflowError: __snake_case : Dict = float("""inf""" ) return loss.item(), perplexity.item() # Setup Accelerator __UpperCamelCase = Accelerator() # Parse configuration __UpperCamelCase = HfArgumentParser(EvaluationArguments) __UpperCamelCase = parser.parse_args() set_seed(args.seed) # Logging __UpperCamelCase = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) # Load model and tokenizer __UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt) __UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader __UpperCamelCase = create_dataloader(args) # Prepare everything with our `accelerator`. __UpperCamelCase , __UpperCamelCase = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("Evaluating and saving model after training") __UpperCamelCase , __UpperCamelCase = evaluate(args) logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : Optional[int] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( _lowerCamelCase = 5000 ) -> int: """simple docstring""" __snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[int] = pentagonal_nums[j] __snake_case : str = pentagonal_i + pentagonal_j __snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
13
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "sentencepiece.model"} __UpperCamelCase = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, } __UpperCamelCase = { "google/rembert": 256, } class _A ( __lowercase ): lowercase__: Optional[Any] = VOCAB_FILES_NAMES lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Any , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any]=False , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=True , __magic_name__ : Tuple="[CLS]" , __magic_name__ : Optional[Any]="[SEP]" , __magic_name__ : int="[UNK]" , __magic_name__ : int="[SEP]" , __magic_name__ : Dict="[PAD]" , __magic_name__ : Tuple="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , **__magic_name__ : List[str] , ) -> Optional[int]: """simple docstring""" super().__init__( do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , **__magic_name__ , ) __snake_case : Union[str, Any] = do_lower_case __snake_case : List[Any] = remove_space __snake_case : Dict = keep_accents __snake_case : Any = vocab_file __snake_case : Dict = spm.SentencePieceProcessor() self.sp_model.Load(__magic_name__ ) @property def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" return len(self.sp_model ) def lowercase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : List[str] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : int ) -> str: """simple docstring""" __snake_case : Dict = self.__dict__.copy() __snake_case : Union[str, Any] = None return state def __setstate__( self : int , __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : List[str] = d __snake_case : Optional[Any] = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any]=False ) -> List[Any]: """simple docstring""" __snake_case : Any = self.sp_model.EncodeAsPieces(__magic_name__ ) return pieces def lowercase__ ( self : List[Any] , __magic_name__ : Any ) -> str: """simple docstring""" return self.sp_model.PieceToId(__magic_name__ ) def lowercase__ ( self : Optional[int] , __magic_name__ : int ) -> Union[str, Any]: """simple docstring""" return self.sp_model.IdToPiece(__magic_name__ ) def lowercase__ ( self : str , __magic_name__ : Optional[Any] ) -> Any: """simple docstring""" __snake_case : Optional[Any] = self.sp_model.decode_pieces(__magic_name__ ) return out_string def lowercase__ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Any = [self.sep_token_id] __snake_case : str = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] def lowercase__ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Any = [self.sep_token_id] __snake_case : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error("""Vocabulary path ({}) should be a directory""".format(__magic_name__ ) ) return __snake_case : Tuple = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ): copyfile(self.vocab_file , __magic_name__ ) return (out_vocab_file,)
13
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : List[Any] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __snake_case : int = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __snake_case : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above __snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __snake_case : Dict = output[output != -float("""inf""" )] __snake_case : Optional[Any] = tf.cast( tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @require_tf class _A ( unittest.TestCase , __lowercase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase__: Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" __snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Optional[int] = 2 __snake_case : str = 2 class _A ( tf.Module ): def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Dict = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" __snake_case : Tuple = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : int = [[2, 0], [1_02, 1_03]] __snake_case : Tuple = [[1, 0], [1, 1]] __snake_case : Union[str, Any] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for batch_size in range(1 , len(__magic_name__ ) + 1 ): __snake_case : Union[str, Any] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } __snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""] __snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Dict = 1 __snake_case : int = 2 class _A ( tf.Module ): def __init__( self : Tuple , __magic_name__ : List[str] ) -> int: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : Union[str, Any] = [[2], [1_02, 1_03]] __snake_case : Tuple = [[1], [1, 1]] __snake_case : List[str] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for input_row in range(len(__magic_name__ ) ): __snake_case : Tuple = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } __snake_case : str = serving_func(**__magic_name__ )["""sequences"""] __snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow @require_tensorflow_text def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ ) class _A ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ) -> int: """simple docstring""" super().__init__() __snake_case : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() ) __snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ ) __snake_case , __snake_case : List[Any] = text.pad_model_inputs( __magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ ) return self.tokenizer.detokenize(__magic_name__ ) __snake_case : int = CompleteSentenceTransformer() __snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) __snake_case : Tuple = complete_model(__magic_name__ ) __snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ ) keras_model.save(__magic_name__ ) def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } __snake_case : str = 14 __snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : int = """Hello, my dog is cute and""" __snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" ) __snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : List[Any] = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __snake_case : Dict = [6_38, 1_98] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : str = """Hugging Face is a technology company based in New York and Paris.""" __snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids __snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : int = bart_model.generate(__magic_name__ ).numpy() class _A ( __lowercase ): def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) ) class _A ( bart_model.model.encoder.__class__ ): def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) __snake_case : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __snake_case : Dict = bart_model.generate(__magic_name__ ).numpy() with self.assertRaises(__magic_name__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__magic_name__ , foo="""bar""" )
13
1
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
13
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" __snake_case : int = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def _a ( _lowerCamelCase ) -> None: """simple docstring""" __snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("""""" ) print(len(_lowerCamelCase ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
1
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __UpperCamelCase = "true" def _a ( _lowerCamelCase , _lowerCamelCase=82 , _lowerCamelCase=16 ) -> str: """simple docstring""" set_seed(42 ) __snake_case : int = RegressionModel() __snake_case : Any = deepcopy(_lowerCamelCase ) __snake_case : List[str] = RegressionDataset(length=_lowerCamelCase ) __snake_case : Any = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase ) model.to(accelerator.device ) __snake_case , __snake_case : List[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase ) return model, ddp_model, dataloader def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" ) __snake_case : List[Any] = load_dataset("""glue""" , """mrpc""" , split="""validation""" ) def tokenize_function(_lowerCamelCase ): __snake_case : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase ) return outputs with accelerator.main_process_first(): __snake_case : Union[str, Any] = dataset.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) __snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_lowerCamelCase ): if use_longest: return tokenizer.pad(_lowerCamelCase , padding="""longest""" , return_tensors="""pt""" ) return tokenizer.pad(_lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=16 ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Dict = Accelerator(dispatch_batches=_lowerCamelCase , split_batches=_lowerCamelCase ) __snake_case : List[str] = get_dataloader(_lowerCamelCase , not dispatch_batches ) __snake_case : int = AutoModelForSequenceClassification.from_pretrained( """hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_lowerCamelCase ) __snake_case , __snake_case : Any = accelerator.prepare(_lowerCamelCase , _lowerCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Tuple = [] for batch in dataloader: __snake_case , __snake_case : Dict = batch.values() with torch.no_grad(): __snake_case : Optional[Any] = model(_lowerCamelCase ) __snake_case , __snake_case : int = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __snake_case , __snake_case : str = [], [] for logit, targ in logits_and_targets: logits.append(_lowerCamelCase ) targs.append(_lowerCamelCase ) __snake_case , __snake_case : Optional[Any] = torch.cat(_lowerCamelCase ), torch.cat(_lowerCamelCase ) return logits, targs def _a ( _lowerCamelCase , _lowerCamelCase=82 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=16 ) -> Union[str, Any]: """simple docstring""" __snake_case , __snake_case , __snake_case : Optional[int] = get_basic_setup(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case , __snake_case : Optional[Any] = generate_predictions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) assert ( len(_lowerCamelCase ) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCamelCase )}''' def _a ( _lowerCamelCase = False , _lowerCamelCase = False ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) __snake_case , __snake_case : Dict = get_mrpc_setup(_lowerCamelCase , _lowerCamelCase ) # First do baseline __snake_case , __snake_case , __snake_case : Optional[Any] = setup["""no"""] model.to(_lowerCamelCase ) model.eval() for batch in dataloader: batch.to(_lowerCamelCase ) with torch.inference_mode(): __snake_case : List[Any] = model(**_lowerCamelCase ) __snake_case : Tuple = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=_lowerCamelCase , references=batch["""labels"""] ) __snake_case : Optional[int] = metric.compute() # Then do distributed __snake_case , __snake_case , __snake_case : Optional[int] = setup["""ddp"""] model.eval() for batch in dataloader: with torch.inference_mode(): __snake_case : Union[str, Any] = model(**_lowerCamelCase ) __snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 ) __snake_case : List[str] = batch["""labels"""] __snake_case , __snake_case : Optional[Any] = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=_lowerCamelCase , references=_lowerCamelCase ) __snake_case : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def _a ( ) -> str: """simple docstring""" __snake_case : str = Accelerator(split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("""**Testing gather_for_metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(_lowerCamelCase , _lowerCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test torch metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __snake_case : Tuple = Accelerator(split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase ) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(_lowerCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test last batch is not dropped when perfectly divisible**""" ) __snake_case : Optional[Any] = Accelerator() test_torch_metrics(_lowerCamelCase , 512 ) accelerator.state._reset_state() def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
13
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCamelCase = logging.getLogger(__name__) class _A ( __lowercase ): def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" super().__init__( __magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , ) __snake_case : List[str] = None def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]: """simple docstring""" logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __snake_case : List[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __snake_case : List[str] = str(distributed_port + 1 ) __snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase__ ( self : int ) -> int: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ ) dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group ) return target_tensor def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ ) return ifname def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): __snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ ) # distributed training __snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic __snake_case : Tuple = None if self._is_main(): __snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )] dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group ) # scatter logic __snake_case : Optional[int] = question_hidden_states.shape[0] __snake_case : Optional[Any] = [] __snake_case : Any = [] if self._is_main(): assert len(__magic_name__ ) == world_size __snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ ) __snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa ) __snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
13
1
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _A ( __lowercase ): def __init__( self : List[str] , __magic_name__ : int , __magic_name__ : Optional[Any]=13 , __magic_name__ : str=7 , __magic_name__ : List[str]=True , __magic_name__ : int=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=True , __magic_name__ : Optional[int]=False , __magic_name__ : str=False , __magic_name__ : Tuple=False , __magic_name__ : int=2 , __magic_name__ : int=99 , __magic_name__ : Dict=0 , __magic_name__ : str=32 , __magic_name__ : Union[str, Any]=5 , __magic_name__ : Dict=4 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : List[str]=12 , __magic_name__ : Tuple=2 , __magic_name__ : List[str]=0.02 , __magic_name__ : int=3 , __magic_name__ : str=4 , __magic_name__ : Any="last" , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=None , ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = parent __snake_case : Tuple = batch_size __snake_case : Any = seq_length __snake_case : List[Any] = is_training __snake_case : Dict = use_input_lengths __snake_case : Tuple = use_token_type_ids __snake_case : List[str] = use_labels __snake_case : Tuple = gelu_activation __snake_case : Optional[Any] = sinusoidal_embeddings __snake_case : int = causal __snake_case : Dict = asm __snake_case : Any = n_langs __snake_case : Optional[int] = vocab_size __snake_case : Dict = n_special __snake_case : Dict = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : List[Any] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : Dict = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : List[str] = initializer_range __snake_case : int = num_labels __snake_case : List[str] = num_choices __snake_case : int = summary_type __snake_case : List[Any] = use_proj __snake_case : Union[str, Any] = scope def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Optional[int] = None if self.use_input_lengths: __snake_case : List[str] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __snake_case : str = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __snake_case : Optional[int] = None __snake_case : List[Any] = None __snake_case : int = None if self.use_labels: __snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[str] = ids_tensor([self.batch_size] , 2 ).float() __snake_case : Dict = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : int = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self : str ) -> Union[str, Any]: """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def lowercase__ ( self : Dict , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , ) -> Optional[int]: """simple docstring""" __snake_case : Dict = FlaubertModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ , lengths=__magic_name__ , langs=__magic_name__ ) __snake_case : Any = model(__magic_name__ , langs=__magic_name__ ) __snake_case : List[Any] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : List[Any] , ) -> int: """simple docstring""" __snake_case : List[Any] = FlaubertWithLMHeadModel(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[Any] = model(__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Any , ) -> Tuple: """simple docstring""" __snake_case : List[Any] = FlaubertForQuestionAnsweringSimple(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Dict = model(__magic_name__ ) __snake_case : Dict = model(__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , ) -> int: """simple docstring""" __snake_case : List[str] = FlaubertForQuestionAnswering(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) __snake_case : Optional[Any] = model( __magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , cls_index=__magic_name__ , is_impossible=__magic_name__ , p_mask=__magic_name__ , ) __snake_case : int = model( __magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , cls_index=__magic_name__ , is_impossible=__magic_name__ , ) ((__snake_case) , ) : int = result_with_labels.to_tuple() __snake_case : int = model(__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ ) ((__snake_case) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowercase__ ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : str , ) -> str: """simple docstring""" __snake_case : Any = FlaubertForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : List[str] = model(__magic_name__ ) __snake_case : List[str] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : int , ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.num_labels __snake_case : List[str] = FlaubertForTokenClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , ) -> Tuple: """simple docstring""" __snake_case : Dict = self.num_choices __snake_case : int = FlaubertForMultipleChoice(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[int] = model( __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Optional[int] = config_and_inputs __snake_case : Optional[Any] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: int = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase__: List[str] = ( { '''feature-extraction''': FlaubertModel, '''fill-mask''': FlaubertWithLMHeadModel, '''question-answering''': FlaubertForQuestionAnsweringSimple, '''text-classification''': FlaubertForSequenceClassification, '''token-classification''': FlaubertForTokenClassification, '''zero-shot''': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> List[Any]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Tuple=False ) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) __snake_case : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : int = FlaubertModelTester(self ) __snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , emb_dim=37 ) def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : int ) -> Any: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__magic_name__ ) def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__magic_name__ ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__magic_name__ ) def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__magic_name__ ) def lowercase__ ( self : Dict ) -> str: """simple docstring""" __snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__magic_name__ ) def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__magic_name__ ) @slow def lowercase__ ( self : str ) -> Any: """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Union[str, Any] = FlaubertModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @slow @require_torch_gpu def lowercase__ ( self : int ) -> int: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return __snake_case : Any = True __snake_case : List[Any] = model_class(config=__magic_name__ ) __snake_case : List[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ ) __snake_case : List[str] = torch.jit.trace( __magic_name__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__magic_name__ , os.path.join(__magic_name__ , """traced_model.pt""" ) ) __snake_case : Optional[int] = torch.jit.load(os.path.join(__magic_name__ , """traced_model.pt""" ) , map_location=__magic_name__ ) loaded(inputs_dict["""input_ids"""].to(__magic_name__ ) , inputs_dict["""attention_mask"""].to(__magic_name__ ) ) @require_torch class _A ( unittest.TestCase ): @slow def lowercase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __snake_case : str = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) __snake_case : Tuple = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) with torch.no_grad(): __snake_case : Optional[Any] = model(__magic_name__ )[0] __snake_case : int = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) __snake_case : Optional[Any] = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class _A : lowercase__: str lowercase__: Optional[str] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return self.major, self.minor, self.patch def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return Version(__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): return other raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' ) def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]: """simple docstring""" try: __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) return self.tuple < other.tuple def __hash__( self : Any ) -> Any: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase__ ( self : str ) -> str: """simple docstring""" return self.version_str def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase ) if not res: raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" return ".".join(str(_lowerCamelCase ) for v in version_tuple )
13
1
'''simple docstring''' def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> float: """simple docstring""" __snake_case : int = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: __snake_case : List[str] = 1 - (matter_density + radiation_density + dark_energy) __snake_case : List[Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __snake_case : List[Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation __UpperCamelCase = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) __snake_case : Tuple = """""" while len(_lowerCamelCase ) % 3 != 0: __snake_case : Any = """0""" + bin_string __snake_case : Tuple = [ bin_string[index : index + 3] for index in range(len(_lowerCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __snake_case : Tuple = 0 for index, val in enumerate(_lowerCamelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) ) oct_string += str(_lowerCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCamelCase = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "SwinForImageClassification", "SwinForMaskedImageModeling", "SwinModel", "SwinPreTrainedModel", "SwinBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL __UpperCamelCase = logging.get_logger(__name__) def _a ( _lowerCamelCase ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_lowerCamelCase ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class _A ( __lowercase ): lowercase__: int = ['''pixel_values'''] def __init__( self : Dict , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_55 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : int , ) -> None: """simple docstring""" super().__init__(**__magic_name__ ) __snake_case : Any = size if size is not None else {"""shortest_edge""": 2_56} __snake_case : List[Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) __snake_case : Any = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} __snake_case : str = get_size_dict(__magic_name__ , param_name="""crop_size""" ) __snake_case : Tuple = do_resize __snake_case : Union[str, Any] = size __snake_case : List[Any] = do_center_crop __snake_case : Tuple = crop_size __snake_case : Any = resample __snake_case : Any = do_rescale __snake_case : Optional[Any] = rescale_factor __snake_case : List[str] = offset __snake_case : Dict = do_normalize __snake_case : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : str , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Tuple , ) -> np.ndarray: """simple docstring""" __snake_case : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) if "shortest_edge" in size: __snake_case : List[str] = get_resize_output_image_size(__magic_name__ , size["""shortest_edge"""] , default_to_square=__magic_name__ ) elif "height" in size and "width" in size: __snake_case : Optional[int] = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[Any] , ) -> np.ndarray: """simple docstring""" __snake_case : Tuple = get_size_dict(__magic_name__ ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__magic_name__ , size=(size["""height"""], size["""width"""]) , data_format=__magic_name__ , **__magic_name__ ) def lowercase__ ( self : str , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = image.astype(np.floataa ) if offset: __snake_case : Dict = image - (scale / 2) return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowercase__ ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. __snake_case : Dict = to_numpy_array(__magic_name__ ) if do_resize: __snake_case : List[Any] = self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) if do_center_crop: __snake_case : List[Any] = self.center_crop(__magic_name__ , size=__magic_name__ ) if do_rescale: __snake_case : Any = self.rescale(image=__magic_name__ , scale=__magic_name__ , offset=__magic_name__ ) if do_normalize: __snake_case : Union[str, Any] = self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) __snake_case : str = to_channel_dimension_format(__magic_name__ , __magic_name__ ) return image def lowercase__ ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Optional[int] = resample if resample is not None else self.resample __snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case : List[str] = do_rescale if do_rescale is not None else self.do_rescale __snake_case : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Optional[int] = offset if offset is not None else self.offset __snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Tuple = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : Union[str, Any] = size if size is not None else self.size __snake_case : List[str] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) __snake_case : Dict = crop_size if crop_size is not None else self.crop_size __snake_case : Tuple = get_size_dict(__magic_name__ , param_name="""crop_size""" ) if not valid_images(__magic_name__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) __snake_case : Any = make_batched(__magic_name__ ) __snake_case : str = [ [ self._preprocess_image( image=__magic_name__ , do_resize=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , do_center_crop=__magic_name__ , crop_size=__magic_name__ , do_rescale=__magic_name__ , rescale_factor=__magic_name__ , offset=__magic_name__ , do_normalize=__magic_name__ , image_mean=__magic_name__ , image_std=__magic_name__ , data_format=__magic_name__ , ) for img in video ] for video in videos ] __snake_case : str = {"""pixel_values""": videos} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
1
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class _A : def __init__( self : Dict , __magic_name__ : str , __magic_name__ : List[str]=3 , __magic_name__ : int=32 , __magic_name__ : List[Any]=3 , __magic_name__ : List[Any]=10 , __magic_name__ : int=[8, 16, 32, 64] , __magic_name__ : Tuple=[1, 1, 2, 1] , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]="relu" , __magic_name__ : str=3 , __magic_name__ : Any=None , __magic_name__ : Dict=["stage2", "stage3", "stage4"] , __magic_name__ : Any=[2, 3, 4] , __magic_name__ : Optional[Any]=1 , ) -> List[Any]: """simple docstring""" __snake_case : Tuple = parent __snake_case : Any = batch_size __snake_case : int = image_size __snake_case : Optional[int] = num_channels __snake_case : Tuple = embeddings_size __snake_case : Tuple = hidden_sizes __snake_case : int = depths __snake_case : str = is_training __snake_case : List[Any] = use_labels __snake_case : str = hidden_act __snake_case : Optional[int] = num_labels __snake_case : List[str] = scope __snake_case : Optional[int] = len(__magic_name__ ) __snake_case : Tuple = out_features __snake_case : str = out_indices __snake_case : int = num_groups def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" __snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowercase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : str = BitModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[int] = self.num_labels __snake_case : str = BitForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : str , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : List[str] = BitBackbone(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[Any] = model(__magic_name__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __snake_case : int = None __snake_case : str = BitBackbone(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : List[str] = model(__magic_name__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowercase__ ( self : Optional[Any] ) -> str: """simple docstring""" __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Optional[int] = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: List[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () lowercase__: List[str] = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) lowercase__: Optional[int] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: Any = False lowercase__: Dict = False def lowercase__ ( self : Any ) -> Dict: """simple docstring""" __snake_case : int = BitModelTester(self ) __snake_case : Any = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return @unittest.skip(reason="""Bit does not output attentions""" ) def lowercase__ ( self : Dict ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Any = model_class(__magic_name__ ) __snake_case : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[Any] = [*signature.parameters.keys()] __snake_case : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : str ) -> Any: """simple docstring""" __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__magic_name__ ) def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(config=__magic_name__ ) for name, module in model.named_modules(): if isinstance(__magic_name__ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def lowercase__ ( self : Any ) -> List[Any]: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ): __snake_case : List[str] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : str = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : List[str] = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: __snake_case : Any = layer_type __snake_case : Union[str, Any] = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" pass def lowercase__ ( self : Dict ) -> Dict: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Optional[int] = BitModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Dict: """simple docstring""" __snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Tuple ) -> List[Any]: """simple docstring""" __snake_case : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__magic_name__ ) __snake_case : Optional[int] = self.default_image_processor __snake_case : Any = prepare_img() __snake_case : Any = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : int = model(**__magic_name__ ) # verify the logits __snake_case : Union[str, Any] = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : str = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) ) @require_torch class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = (BitBackbone,) if is_torch_available() else () lowercase__: Optional[Any] = BitConfig lowercase__: Tuple = False def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Optional[Any] = BitModelTester(self )
13
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = image_size __snake_case : Tuple = patch_size __snake_case : str = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : Dict = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : List[Any] = shape_size __snake_case : Tuple = num_labels __snake_case : List[Any] = num_choices __snake_case : Optional[Any] = scope __snake_case : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : List[str] = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __snake_case : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Union[str, Any] = bbox[i, j, 3] __snake_case : Union[str, Any] = bbox[i, j, 1] __snake_case : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Optional[Any] = bbox[i, j, 2] __snake_case : Tuple = bbox[i, j, 0] __snake_case : Optional[Any] = tmp_coordinate __snake_case : Dict = tf.constant(__magic_name__ ) __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ ) # text + image __snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) __snake_case : List[str] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , ) __snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Any = self.num_labels __snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ ) __snake_case : Tuple = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = 2 __snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs __snake_case : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase__: Union[str, Any] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase__: Dict = False lowercase__: int = False lowercase__: Dict = False def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" return True def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict: """simple docstring""" __snake_case : Any = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = { k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : str = TFLayoutLMvaModelTester(self ) __snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ): # The number of elements in the loss should be the same as the number of elements in the label __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0] ] __snake_case : List[str] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = prepared_for_class.pop("""input_ids""" ) __snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : str = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __snake_case : str = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __snake_case : Dict = -1_00 __snake_case : str = tf.convert_to_tensor(__magic_name__ ) __snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = model(__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) # Get keys that were added with the _prepare_for_class function __snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys() __snake_case : Optional[Any] = inspect.signature(model.call ).parameters __snake_case : int = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __snake_case : Union[str, Any] = {0: """input_ids"""} for label_key in label_keys: __snake_case : int = signature_names.index(__magic_name__ ) __snake_case : Optional[int] = label_key __snake_case : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __snake_case : List[str] = prepared_for_class[value] __snake_case : str = tuple(__magic_name__ ) # Send to model __snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Tuple = type self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __snake_case : str = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values __snake_case : Tuple = tf.constant([[1, 2]] ) __snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) # verify the logits __snake_case : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Tuple = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class _A ( metaclass=__lowercase ): lowercase__: Tuple = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : str , *__magic_name__ : Union[str, Any] , **__magic_name__ : Optional[int] ) -> Optional[Any]: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : Optional[Any] , *__magic_name__ : Union[str, Any] , **__magic_name__ : int ) -> str: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : List[Any] , *__magic_name__ : Dict , **__magic_name__ : Dict ) -> List[str]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _A ( metaclass=__lowercase ): lowercase__: Tuple = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : Tuple , *__magic_name__ : str , **__magic_name__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : Optional[int] , *__magic_name__ : Optional[int] , **__magic_name__ : int ) -> List[str]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _A ( metaclass=__lowercase ): lowercase__: Union[str, Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : int , *__magic_name__ : int , **__magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : int , *__magic_name__ : str , **__magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : int , *__magic_name__ : Dict , **__magic_name__ : Any ) -> str: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _A ( metaclass=__lowercase ): lowercase__: Tuple = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : List[Any] , *__magic_name__ : List[str] , **__magic_name__ : Union[str, Any] ) -> int: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : Dict , *__magic_name__ : List[str] , **__magic_name__ : Dict ) -> List[str]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : Optional[Any] , *__magic_name__ : Optional[int] , **__magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _A ( metaclass=__lowercase ): lowercase__: Optional[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Optional[int] , *__magic_name__ : str , **__magic_name__ : Dict ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : str , *__magic_name__ : Any , **__magic_name__ : List[Any] ) -> Dict: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : Any , *__magic_name__ : Tuple , **__magic_name__ : str ) -> str: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _A ( metaclass=__lowercase ): lowercase__: Dict = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : Any , *__magic_name__ : Tuple , **__magic_name__ : Any ) -> Any: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowercase__ ( cls : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ) -> Optional[int]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
13
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]: """simple docstring""" __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[Any] = num_channels __snake_case : List[str] = patch_size __snake_case : List[str] = num_frames __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : str = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = attention_type __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scope __snake_case : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case : str = (image_size // patch_size) ** 2 __snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case : str = self.num_labels return config def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" __snake_case : Optional[int] = TimesformerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str: """simple docstring""" __snake_case : Any = TimesformerForVideoClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) # verify the logits shape __snake_case : Dict = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__: List[Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__: List[str] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : List[str] = TimesformerModelTester(self ) __snake_case : List[Any] = ConfigTester( self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int: """simple docstring""" __snake_case : Dict = copy.deepcopy(__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = TimesformerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : List[str] = self.model_tester.seq_length __snake_case : Tuple = self.model_tester.num_frames __snake_case : str = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Dict = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Optional[int] = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case : int = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Optional[int] = True __snake_case : Optional[int] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : List[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ): __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.hidden_states __snake_case : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : List[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( __magic_name__ ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : Dict = prepare_video() __snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home __UpperCamelCase = HUGGINGFACE_HUB_CACHE __UpperCamelCase = "config.json" __UpperCamelCase = "diffusion_pytorch_model.bin" __UpperCamelCase = "diffusion_flax_model.msgpack" __UpperCamelCase = "model.onnx" __UpperCamelCase = "diffusion_pytorch_model.safetensors" __UpperCamelCase = "weights.pb" __UpperCamelCase = "https://huggingface.co" __UpperCamelCase = default_cache_path __UpperCamelCase = "diffusers_modules" __UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules")) __UpperCamelCase = ["fp16", "non-ema"] __UpperCamelCase = ".self_attn"
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ConditionalDetrFeatureExtractor"] __UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow __UpperCamelCase = False class _A ( unittest.TestCase ): def lowercase__ ( self : int , __magic_name__ : str=32 ) -> str: """simple docstring""" set_seed(0 ) __snake_case : Union[str, Any] = UNetaDModel(sample_size=__magic_name__ , in_channels=3 , out_channels=3 ) __snake_case : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.0001 ) return model, optimizer @slow def lowercase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : Tuple = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable __snake_case : List[Any] = DDPMScheduler( num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=__magic_name__ , ) __snake_case : Any = DDIMScheduler( num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=__magic_name__ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) __snake_case : Dict = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__magic_name__ ) for _ in range(4 )] __snake_case : Any = [torch.randn((4, 3, 32, 32) ).to(__magic_name__ ) for _ in range(4 )] __snake_case : int = [torch.randint(0 , 10_00 , (4,) ).long().to(__magic_name__ ) for _ in range(4 )] # train with a DDPM scheduler __snake_case , __snake_case : List[Any] = self.get_model_optimizer(resolution=32 ) model.train().to(__magic_name__ ) for i in range(4 ): optimizer.zero_grad() __snake_case : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) __snake_case : int = model(__magic_name__ , timesteps[i] ).sample __snake_case : Dict = torch.nn.functional.mse_loss(__magic_name__ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM __snake_case , __snake_case : str = self.get_model_optimizer(resolution=32 ) model.train().to(__magic_name__ ) for i in range(4 ): optimizer.zero_grad() __snake_case : Tuple = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) __snake_case : List[Any] = model(__magic_name__ , timesteps[i] ).sample __snake_case : List[Any] = torch.nn.functional.mse_loss(__magic_name__ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) ) self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : str = 0 __snake_case : Optional[int] = len(_lowerCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" if len(_lowerCamelCase ) <= 1: return arr, 0 __snake_case : Any = len(_lowerCamelCase ) // 2 __snake_case : List[str] = arr[0:mid] __snake_case : int = arr[mid:] __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase ) __snake_case : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = [] __snake_case : List[str] = 0 while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , _lowerCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __snake_case : Any = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) # an empty list should also have zero inversions __snake_case : List[Any] = [] __snake_case : List[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[Any] = IFInpaintingPipeline lowercase__: Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} lowercase__: List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase__: Any = PipelineTesterMixin.required_optional_params - {'''latents'''} def lowercase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" return self._get_dummy_components() def lowercase__ ( self : Any , __magic_name__ : str , __magic_name__ : str=0 ) -> Any: """simple docstring""" if str(__magic_name__ ).startswith("""mps""" ): __snake_case : Any = torch.manual_seed(__magic_name__ ) else: __snake_case : Any = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : Any = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def lowercase__ ( self : int ) -> int: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowercase__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" self._test_save_load_local() def lowercase__ ( self : int ) -> int: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
13
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
13
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ) __snake_case : Optional[int] = AutoTokenizer.from_pretrained("""google/mt5-small""" ) __snake_case : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids __snake_case : Optional[int] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids __snake_case : Tuple = model(__magic_name__ , labels=__magic_name__ ).loss __snake_case : Optional[Any] = -tf.math.reduce_mean(__magic_name__ ).numpy() __snake_case : Union[str, Any] = -21.228168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
13
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = CanineTokenizer lowercase__: Optional[int] = False def lowercase__ ( self : Any ) -> Any: """simple docstring""" super().setUp() __snake_case : Dict = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer: """simple docstring""" __snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) __snake_case : Optional[Any] = 10_24 return tokenizer @require_torch def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[Any] = self.canine_tokenizer __snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off __snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Any = self.canine_tokenizer __snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] __snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.canine_tokenizer __snake_case : Optional[Any] = [ """What's the weater?""", """It's about 25 degrees.""", ] __snake_case : Any = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Dict = tempfile.mkdtemp() __snake_case : str = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) __snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Optional[int] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __snake_case : List[Any] = chr(0xE007 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE005 __snake_case : Tuple = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) __snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) __snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) __snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) __snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : Dict = chr(0xE005 ) __snake_case : str = chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) __snake_case : Tuple = tokenizer.tokenize(__magic_name__ ) __snake_case : Any = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __snake_case : Optional[Any] = 0xE006 __snake_case : List[str] = chr(__magic_name__ ) __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Any = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Tuple = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE006 __snake_case : int = chr(__magic_name__ ) __snake_case : List[Any] = [new_token_a] __snake_case : Union[str, Any] = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __snake_case : Any = 0xE007 __snake_case : Any = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] __snake_case : Union[str, Any] = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : List[str] = """hello world""" if self.space_between_special_tokens: __snake_case : Union[str, Any] = """[CLS] hello world [SEP]""" else: __snake_case : List[Any] = input __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowercase__ ( self : Tuple ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : str = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __snake_case : Dict = """a""" __snake_case : Tuple = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) __snake_case : Dict = 0xE006 __snake_case : str = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" pass def lowercase__ ( self : str ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass
13
1
'''simple docstring''' from __future__ import annotations from math import pi def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if inductance < 0: raise ValueError("""Inductance cannot be negative""" ) if frequency < 0: raise ValueError("""Frequency cannot be negative""" ) if reactance < 0: raise ValueError("""Inductive reactance cannot be negative""" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
13
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class _A ( __lowercase , unittest.TestCase ): lowercase__: str = ShapEPipeline lowercase__: Union[str, Any] = ['''prompt'''] lowercase__: Dict = ['''prompt'''] lowercase__: Union[str, Any] = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] lowercase__: Union[str, Any] = False @property def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return 32 @property def lowercase__ ( self : List[Any] ) -> Tuple: """simple docstring""" return 32 @property def lowercase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" return 8 @property def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __snake_case : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(__magic_name__ ) @property def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __snake_case : Tuple = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } __snake_case : List[Any] = PriorTransformer(**__magic_name__ ) return model @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" torch.manual_seed(0 ) __snake_case : Optional[Any] = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } __snake_case : Any = ShapERenderer(**__magic_name__ ) return model def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" __snake_case : int = self.dummy_prior __snake_case : Optional[int] = self.dummy_text_encoder __snake_case : str = self.dummy_tokenizer __snake_case : List[str] = self.dummy_renderer __snake_case : str = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=__magic_name__ , clip_sample=__magic_name__ , clip_sample_range=1.0 , ) __snake_case : List[Any] = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def lowercase__ ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : str=0 ) -> Any: """simple docstring""" if str(__magic_name__ ).startswith("""mps""" ): __snake_case : int = torch.manual_seed(__magic_name__ ) else: __snake_case : Dict = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : Any = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[Any] = """cpu""" __snake_case : Optional[int] = self.get_dummy_components() __snake_case : List[Any] = self.pipeline_class(**__magic_name__ ) __snake_case : Optional[Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : str = pipe(**self.get_dummy_inputs(__magic_name__ ) ) __snake_case : str = output.images[0] __snake_case : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __snake_case : List[Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase__ ( self : Any ) -> List[str]: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase__ ( self : str ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = torch_device == """cpu""" __snake_case : List[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__magic_name__ , relax_max_difference=__magic_name__ , ) def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : str = self.get_dummy_components() __snake_case : List[str] = self.pipeline_class(**__magic_name__ ) __snake_case : Any = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : Optional[int] = 1 __snake_case : Union[str, Any] = 2 __snake_case : Any = self.get_dummy_inputs(__magic_name__ ) for key in inputs.keys(): if key in self.batch_params: __snake_case : Optional[int] = batch_size * [inputs[key]] __snake_case : Tuple = pipe(**__magic_name__ , num_images_per_prompt=__magic_name__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : str ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : int ) -> Dict: """simple docstring""" __snake_case : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) __snake_case : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" ) __snake_case : Union[str, Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(0 ) __snake_case : Any = pipe( """a shark""" , generator=__magic_name__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
13
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class _A ( __lowercase ): lowercase__: str = '''codegen''' lowercase__: Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int: """simple docstring""" __snake_case : List[str] = vocab_size __snake_case : Union[str, Any] = n_ctx __snake_case : int = n_positions __snake_case : str = n_embd __snake_case : Dict = n_layer __snake_case : List[Any] = n_head __snake_case : Any = n_inner __snake_case : str = rotary_dim __snake_case : List[str] = activation_function __snake_case : Tuple = resid_pdrop __snake_case : Dict = embd_pdrop __snake_case : int = attn_pdrop __snake_case : Tuple = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = use_cache __snake_case : Dict = bos_token_id __snake_case : Union[str, Any] = eos_token_id super().__init__( bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ ) class _A ( __lowercase ): def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple: """simple docstring""" super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ ) if not getattr(self._config , """pad_token_id""" , __magic_name__ ): # TODO: how to do that better? __snake_case : List[str] = 0 @property def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) __snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: __snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return self._config.n_layer @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.n_head def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() __snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __snake_case , __snake_case : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __snake_case : Tuple = seqlen + 2 __snake_case : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : List[str] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] __snake_case : Optional[int] = common_inputs["""attention_mask"""] if self.use_past: __snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype __snake_case : Optional[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return 13
13
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) class _A ( __lowercase ): lowercase__: Tuple = '''timm_backbone''' def __init__( self : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Dict=3 , __magic_name__ : List[str]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Tuple=None , **__magic_name__ : Union[str, Any] , ) -> List[Any]: """simple docstring""" super().__init__(**__magic_name__ ) __snake_case : Any = backbone __snake_case : Tuple = num_channels __snake_case : int = features_only __snake_case : int = use_pretrained_backbone __snake_case : List[str] = True __snake_case : Dict = out_indices if out_indices is not None else (-1,)
13
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( __lowercase , unittest.TestCase ): lowercase__: int = KandinskyImgaImgPipeline lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] lowercase__: int = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] lowercase__: List[Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowercase__: Any = False @property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return 32 @property def lowercase__ ( self : str ) -> str: """simple docstring""" return 32 @property def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" return self.time_input_dim @property def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return 1_00 @property def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" __snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) __snake_case : Tuple = MultilingualCLIP(__magic_name__ ) __snake_case : Optional[Any] = text_encoder.eval() return text_encoder @property def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __snake_case : Tuple = UNetaDConditionModel(**__magic_name__ ) return model @property def lowercase__ ( self : str ) -> Dict: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : Tuple = self.dummy_text_encoder __snake_case : Dict = self.dummy_tokenizer __snake_case : Dict = self.dummy_unet __snake_case : int = self.dummy_movq __snake_case : List[Any] = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __snake_case : Dict = DDIMScheduler(**__magic_name__ ) __snake_case : Any = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str: """simple docstring""" __snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ ) # create init_image __snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__magic_name__ ).startswith("""mps""" ): __snake_case : str = torch.manual_seed(__magic_name__ ) else: __snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : Optional[Any] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : Dict = """cpu""" __snake_case : Union[str, Any] = self.get_dummy_components() __snake_case : List[str] = self.pipeline_class(**__magic_name__ ) __snake_case : Optional[Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) ) __snake_case : List[str] = output.images __snake_case : Any = pipe( **self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0] __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : int = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) __snake_case : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __snake_case : List[Any] = """A red cartoon frog, 4k""" __snake_case : str = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__magic_name__ ) __snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) __snake_case : Any = pipeline.to(__magic_name__ ) pipeline.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case , __snake_case : Optional[Any] = pipe_prior( __magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __snake_case : List[str] = pipeline( __magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) __snake_case : Dict = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
13
1
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} __UpperCamelCase = { "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json" ), }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt" ), }, } __UpperCamelCase = { "allenai/longformer-base-4096": 4096, "allenai/longformer-large-4096": 4096, "allenai/longformer-large-4096-finetuned-triviaqa": 4096, "allenai/longformer-base-4096-extra.pos.embd.only": 4096, "allenai/longformer-large-4096-extra.pos.embd.only": 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _a ( ) -> str: """simple docstring""" __snake_case : int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) __snake_case : List[str] = bs[:] __snake_case : int = 0 for b in range(2**8 ): if b not in bs: bs.append(_lowerCamelCase ) cs.append(2**8 + n ) n += 1 __snake_case : Any = [chr(_lowerCamelCase ) for n in cs] return dict(zip(_lowerCamelCase , _lowerCamelCase ) ) def _a ( _lowerCamelCase ) -> List[Any]: """simple docstring""" __snake_case : int = set() __snake_case : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __snake_case : List[str] = char return pairs class _A ( __lowercase ): lowercase__: Union[str, Any] = VOCAB_FILES_NAMES lowercase__: Dict = PRETRAINED_VOCAB_FILES_MAP lowercase__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: int = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Any="replace" , __magic_name__ : Dict="<s>" , __magic_name__ : int="</s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Optional[Any]="<s>" , __magic_name__ : int="<unk>" , __magic_name__ : Optional[Any]="<pad>" , __magic_name__ : str="<mask>" , __magic_name__ : Union[str, Any]=False , **__magic_name__ : Dict , ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token __snake_case : str = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token __snake_case : List[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token __snake_case : Union[str, Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token __snake_case : str = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __snake_case : List[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token super().__init__( errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding="""utf-8""" ) as vocab_handle: __snake_case : Tuple = json.load(__magic_name__ ) __snake_case : Optional[Any] = {v: k for k, v in self.encoder.items()} __snake_case : Any = errors # how to handle errors in decoding __snake_case : List[Any] = bytes_to_unicode() __snake_case : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(__magic_name__ , encoding="""utf-8""" ) as merges_handle: __snake_case : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1] __snake_case : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges] __snake_case : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) __snake_case : Optional[Any] = {} __snake_case : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __snake_case : Any = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def lowercase__ ( self : int ) -> List[str]: """simple docstring""" return len(self.encoder ) def lowercase__ ( self : Any ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowercase__ ( self : Dict , __magic_name__ : Tuple ) -> Union[str, Any]: """simple docstring""" if token in self.cache: return self.cache[token] __snake_case : Any = tuple(__magic_name__ ) __snake_case : Tuple = get_pairs(__magic_name__ ) if not pairs: return token while True: __snake_case : Optional[Any] = min(__magic_name__ , key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __snake_case , __snake_case : List[Any] = bigram __snake_case : str = [] __snake_case : List[Any] = 0 while i < len(__magic_name__ ): try: __snake_case : Optional[int] = word.index(__magic_name__ , __magic_name__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __snake_case : List[str] = j if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __snake_case : List[Any] = tuple(__magic_name__ ) __snake_case : int = new_word if len(__magic_name__ ) == 1: break else: __snake_case : List[Any] = get_pairs(__magic_name__ ) __snake_case : Dict = """ """.join(__magic_name__ ) __snake_case : Optional[Any] = word return word def lowercase__ ( self : List[str] , __magic_name__ : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : int = [] for token in re.findall(self.pat , __magic_name__ ): __snake_case : str = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__magic_name__ ).split(""" """ ) ) return bpe_tokens def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" return self.encoder.get(__magic_name__ , self.encoder.get(self.unk_token ) ) def lowercase__ ( self : Dict , __magic_name__ : Dict ) -> str: """simple docstring""" return self.decoder.get(__magic_name__ ) def lowercase__ ( self : int , __magic_name__ : Tuple ) -> List[Any]: """simple docstring""" __snake_case : Tuple = """""".join(__magic_name__ ) __snake_case : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowercase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __snake_case : int = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __snake_case : Union[str, Any] = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + """\n""" ) __snake_case : Tuple = 0 with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) __snake_case : List[Any] = token_index writer.write(""" """.join(__magic_name__ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowercase__ ( self : Any , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : Optional[Any] = [self.cls_token_id] __snake_case : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is None: return [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] def lowercase__ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : List[str] = [self.sep_token_id] __snake_case : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any]=False , **__magic_name__ : List[str] ) -> Any: """simple docstring""" __snake_case : Dict = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__magic_name__ ) > 0 and not text[0].isspace()): __snake_case : Dict = """ """ + text return (text, kwargs)
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __UpperCamelCase = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __UpperCamelCase = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } class _A ( __lowercase ): lowercase__: Any = VOCAB_FILES_NAMES lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask'''] lowercase__: List[str] = BartTokenizer def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]: """simple docstring""" super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) __snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) ) __snake_case : str = add_prefix_space __snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ ) __snake_case : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __snake_case : Any = """post_processor""" __snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: __snake_case : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __snake_case : Tuple = tuple(state["""sep"""] ) if "cls" in state: __snake_case : int = tuple(state["""cls"""] ) __snake_case : Optional[int] = False if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : Optional[Any] = add_prefix_space __snake_case : List[str] = True if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets: __snake_case : Optional[int] = trim_offsets __snake_case : Any = True if changes_to_apply: __snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) ) __snake_case : List[Any] = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def lowercase__ ( self : List[Any] ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value __snake_case : Union[str, Any] = value def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding: """simple docstring""" __snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding: """simple docstring""" __snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Optional[int] = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
13
1
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: # noqa: E741 """simple docstring""" while r - l > 1: __snake_case : List[str] = (l + r) // 2 if v[m] >= key: __snake_case : Any = m else: __snake_case : Tuple = m # noqa: E741 return r def _a ( _lowerCamelCase ) -> int: """simple docstring""" if len(_lowerCamelCase ) == 0: return 0 __snake_case : int = [0] * len(_lowerCamelCase ) __snake_case : Dict = 1 __snake_case : Optional[Any] = v[0] for i in range(1 , len(_lowerCamelCase ) ): if v[i] < tail[0]: __snake_case : Optional[Any] = v[i] elif v[i] > tail[length - 1]: __snake_case : Tuple = v[i] length += 1 else: __snake_case : int = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import os import numpy import onnx def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Optional[int] = a.name __snake_case : Dict = b.name __snake_case : Optional[int] = """""" __snake_case : int = """""" __snake_case : Any = a == b __snake_case : List[Any] = name_a __snake_case : List[str] = name_b return res def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_lowerCamelCase , _lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Dict = list(model.graph.initializer ) __snake_case : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __snake_case : Tuple = inits[i].name __snake_case : Tuple = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : str = os.path.dirname(_lowerCamelCase ) __snake_case : Dict = os.path.basename(_lowerCamelCase ) __snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) ) __snake_case : Dict = list(model.graph.initializer ) __snake_case : Optional[int] = set() __snake_case : Optional[Any] = {} __snake_case : Tuple = [] __snake_case : List[Any] = 0 for i in range(len(_lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(_lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_lowerCamelCase ) dup_set.add(_lowerCamelCase ) __snake_case : List[Any] = inits[j].data_type __snake_case : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , _lowerCamelCase ) total_reduced_size += mem_size __snake_case : Any = inits[i].name __snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(_lowerCamelCase ) else: __snake_case : Dict = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) __snake_case : int = sorted(_lowerCamelCase ) _remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : str = """optimized_""" + model_file_name __snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) onnx.save(_lowerCamelCase , _lowerCamelCase ) return new_model
13
1
'''simple docstring''' def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool: """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool: """simple docstring""" if curr_ind == len(_lowerCamelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(_lowerCamelCase ) ): if valid_connection(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # Insert current vertex into path as next transition __snake_case : Optional[int] = next_ver # Validate created path if util_hamilton_cycle(_lowerCamelCase , _lowerCamelCase , curr_ind + 1 ): return True # Backtrack __snake_case : Optional[int] = -1 return False def _a ( _lowerCamelCase , _lowerCamelCase = 0 ) -> list[int]: """simple docstring""" __snake_case : List[str] = [-1] * (len(_lowerCamelCase ) + 1) # initialize start and end of path with starting index __snake_case : Tuple = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(_lowerCamelCase , _lowerCamelCase , 1 ) else []
13
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase = ["small", "medium", "large"] __UpperCamelCase = "lm_head.decoder.weight" __UpperCamelCase = "lm_head.weight" def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : Optional[int] = torch.load(_lowerCamelCase ) __snake_case : Optional[int] = d.pop(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) __UpperCamelCase = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") __UpperCamelCase = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
13
1
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _A : def __init__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Tuple=14 , __magic_name__ : List[str]=7 , __magic_name__ : Any=True , __magic_name__ : List[Any]=True , __magic_name__ : int=False , __magic_name__ : int=True , __magic_name__ : Optional[int]=99 , __magic_name__ : List[str]=32 , __magic_name__ : Tuple=4 , __magic_name__ : Tuple=4 , __magic_name__ : List[str]=4 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Union[str, Any]=5_12 , __magic_name__ : str=0.02 , ) -> Any: """simple docstring""" __snake_case : Dict = parent __snake_case : Union[str, Any] = batch_size __snake_case : Tuple = seq_length __snake_case : Tuple = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : Any = use_token_type_ids __snake_case : List[str] = use_labels __snake_case : List[str] = vocab_size __snake_case : int = hidden_size __snake_case : Any = rotary_dim __snake_case : List[Any] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : List[Any] = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : int = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Optional[int] = initializer_range __snake_case : Tuple = None __snake_case : Optional[Any] = vocab_size - 1 __snake_case : Any = vocab_size - 1 __snake_case : Union[str, Any] = vocab_size - 1 def lowercase__ ( self : List[Any] ) -> int: """simple docstring""" __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = None if self.use_input_mask: __snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__magic_name__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase__ ( self : Dict ) -> Dict: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : List[Any] = config_and_inputs __snake_case : List[str] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = 20 __snake_case : int = model_class_name(__magic_name__ ) __snake_case : List[Any] = model.init_cache(input_ids.shape[0] , __magic_name__ ) __snake_case : Tuple = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __snake_case : List[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __snake_case : Tuple = model( input_ids[:, :-1] , attention_mask=__magic_name__ , past_key_values=__magic_name__ , position_ids=__magic_name__ , ) __snake_case : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) __snake_case : Tuple = model( input_ids[:, -1:] , attention_mask=__magic_name__ , past_key_values=outputs_cache.past_key_values , position_ids=__magic_name__ , ) __snake_case : Tuple = model(__magic_name__ ) __snake_case : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] ) -> int: """simple docstring""" __snake_case : List[Any] = 20 __snake_case : List[str] = model_class_name(__magic_name__ ) __snake_case : int = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __snake_case : Any = model.init_cache(input_ids.shape[0] , __magic_name__ ) __snake_case : List[str] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __snake_case : int = model( input_ids[:, :-1] , attention_mask=__magic_name__ , past_key_values=__magic_name__ , position_ids=__magic_name__ , ) __snake_case : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) __snake_case : Optional[Any] = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__magic_name__ , position_ids=__magic_name__ , ) __snake_case : str = model(__magic_name__ , attention_mask=__magic_name__ ) __snake_case : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) @require_flax class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () lowercase__: Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = FlaxGPTJModelTester(self ) def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: __snake_case , __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: __snake_case , __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @tooslow def lowercase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __snake_case : List[str] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) __snake_case : Dict = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=__magic_name__ , truncation=__magic_name__ ) __snake_case : Union[str, Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) __snake_case : str = False __snake_case : Optional[Any] = model.config.eos_token_id __snake_case : Optional[Any] = jax.jit(model.generate ) __snake_case : Any = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences __snake_case : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) __snake_case : int = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(__magic_name__ , __magic_name__ ) @is_pt_flax_cross_test def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __snake_case : Union[str, Any] = self._prepare_for_class(__magic_name__ , __magic_name__ ) __snake_case : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __snake_case : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning __snake_case : int = getattr(__magic_name__ , __magic_name__ ) __snake_case , __snake_case : Dict = pt_inputs["""input_ids"""].shape __snake_case : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__magic_name__ ): __snake_case : Tuple = 0 __snake_case : Union[str, Any] = 1 __snake_case : Any = 0 __snake_case : int = 1 __snake_case : Optional[Any] = pt_model_class(__magic_name__ ).eval() __snake_case : List[str] = model_class(__magic_name__ , dtype=jnp.floataa ) __snake_case : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __magic_name__ ) __snake_case : Tuple = fx_state with torch.no_grad(): __snake_case : Tuple = pt_model(**__magic_name__ ).to_tuple() __snake_case : Dict = fx_model(**__magic_name__ ).to_tuple() self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__magic_name__ ) __snake_case : int = model_class.from_pretrained(__magic_name__ , from_pt=__magic_name__ ) __snake_case : Tuple = fx_model_loaded(**__magic_name__ ).to_tuple() self.assertEqual( len(__magic_name__ ) , len(__magic_name__ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(__magic_name__ , __magic_name__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowercase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __snake_case : Any = self._prepare_for_class(__magic_name__ , __magic_name__ ) __snake_case : Tuple = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __snake_case : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning __snake_case : int = getattr(__magic_name__ , __magic_name__ ) __snake_case : Tuple = pt_model_class(__magic_name__ ).eval() __snake_case : Union[str, Any] = model_class(__magic_name__ , dtype=jnp.floataa ) __snake_case : Union[str, Any] = load_flax_weights_in_pytorch_model(__magic_name__ , fx_model.params ) __snake_case , __snake_case : Any = pt_inputs["""input_ids"""].shape __snake_case : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__magic_name__ ): __snake_case : Dict = 0 __snake_case : str = 1 __snake_case : Tuple = 0 __snake_case : Union[str, Any] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __snake_case : str = pt_model(**__magic_name__ ).to_tuple() __snake_case : List[Any] = fx_model(**__magic_name__ ).to_tuple() self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__magic_name__ ) __snake_case : Optional[int] = pt_model_class.from_pretrained(__magic_name__ , from_flax=__magic_name__ ) with torch.no_grad(): __snake_case : str = pt_model_loaded(**__magic_name__ ).to_tuple() self.assertEqual( len(__magic_name__ ) , len(__magic_name__ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" for model_class_name in self.all_model_classes: __snake_case : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) __snake_case : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ )
13
'''simple docstring''' __UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _a ( ) -> None: """simple docstring""" __snake_case : Dict = input("""Enter message: """ ) __snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ ) __snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): __snake_case : Any = """encrypt""" __snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase ) elif mode.lower().startswith("""d""" ): __snake_case : Optional[int] = """decrypt""" __snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase ) print(F'''\n{mode.title()}ed message:''' ) print(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : str = [] __snake_case : Dict = 0 __snake_case : Optional[int] = key.upper() for symbol in message: __snake_case : Any = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_lowerCamelCase ): __snake_case : Tuple = 0 else: translated.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' from __future__ import annotations import collections import pprint from pathlib import Path def _a ( _lowerCamelCase ) -> str: """simple docstring""" return "".join(sorted(_lowerCamelCase ) ) def _a ( _lowerCamelCase ) -> list[str]: """simple docstring""" return word_by_signature[signature(_lowerCamelCase )] __UpperCamelCase = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") __UpperCamelCase = sorted({word.strip().lower() for word in data.splitlines()}) __UpperCamelCase = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __UpperCamelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
13
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for attribute in key.split(""".""" ): __snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case : Union[str, Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : Tuple = value elif weight_type == "bias": __snake_case : str = value else: __snake_case : List[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = [] __snake_case : List[Any] = fairseq_model.state_dict() __snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : Any = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __snake_case : Dict = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2] __snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase ) if "weight_g" in name: __snake_case : Dict = """weight_g""" elif "weight_v" in name: __snake_case : List[str] = """weight_v""" elif "weight" in name: __snake_case : str = """weight""" elif "bias" in name: __snake_case : int = """bias""" else: __snake_case : int = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Dict = full_name.split("""conv_layers.""" )[-1] __snake_case : Optional[int] = name.split(""".""" ) __snake_case : Dict = int(items[0] ) __snake_case : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case : List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : List[str] = SEWConfig() if is_finetuned: __snake_case : List[Any] = model.wav_encoder.wav_model.cfg else: __snake_case : Optional[Any] = model.cfg __snake_case : Tuple = fs_config.conv_bias __snake_case : List[Any] = eval(fs_config.conv_feature_layers ) __snake_case : List[Any] = [x[0] for x in conv_layers] __snake_case : Dict = [x[1] for x in conv_layers] __snake_case : Tuple = [x[2] for x in conv_layers] __snake_case : List[str] = """gelu""" __snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" __snake_case : Optional[int] = 0.0 __snake_case : Optional[Any] = fs_config.activation_fn.name __snake_case : Dict = fs_config.encoder_embed_dim __snake_case : Dict = 0.02 __snake_case : Any = fs_config.encoder_ffn_embed_dim __snake_case : Tuple = 1E-5 __snake_case : Dict = fs_config.encoder_layerdrop __snake_case : Any = fs_config.encoder_attention_heads __snake_case : int = fs_config.conv_pos_groups __snake_case : Tuple = fs_config.conv_pos __snake_case : Optional[int] = len(_lowerCamelCase ) __snake_case : int = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : Union[str, Any] = model.cfg __snake_case : Tuple = fs_config.final_dropout __snake_case : Tuple = fs_config.layerdrop __snake_case : Any = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : List[Any] = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : str = fs_config.mask_channel_length __snake_case : Any = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : str = fs_config.mask_prob __snake_case : str = """Wav2Vec2FeatureExtractor""" __snake_case : Dict = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int: """simple docstring""" if is_finetuned: __snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase ) else: __snake_case : int = convert_config(model[0] , _lowerCamelCase ) __snake_case : Dict = model[0].eval() __snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __snake_case : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if is_finetuned: if dict_path: __snake_case : str = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Union[str, Any] = target_dict.pad_index __snake_case : Optional[Any] = target_dict.bos_index __snake_case : Tuple = target_dict.pad_index __snake_case : List[str] = target_dict.bos_index __snake_case : Optional[Any] = target_dict.eos_index __snake_case : List[str] = len(target_dict.symbols ) __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" ) if not os.path.isdir(_lowerCamelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _lowerCamelCase ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case : List[str] = SEWForCTC(_lowerCamelCase ) else: __snake_case : List[str] = SEWModel(_lowerCamelCase ) feature_extractor.save_pretrained(_lowerCamelCase ) recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __UpperCamelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
13
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class _A ( metaclass=__lowercase ): lowercase__: int = ['''speech'''] def __init__( self : Tuple , *__magic_name__ : List[Any] , **__magic_name__ : Dict ) -> Optional[Any]: """simple docstring""" requires_backends(self , ["""speech"""] ) class _A ( metaclass=__lowercase ): lowercase__: List[str] = ['''speech'''] def __init__( self : Optional[Any] , *__magic_name__ : Optional[Any] , **__magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ["""speech"""] )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : Optional[int] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( _lowerCamelCase = 5000 ) -> int: """simple docstring""" __snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[int] = pentagonal_nums[j] __snake_case : str = pentagonal_i + pentagonal_j __snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
13
1
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version __UpperCamelCase = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") __UpperCamelCase = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeqaSeqLM, "translation": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization __UpperCamelCase = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } __UpperCamelCase = sorted(arg_to_scheduler.keys()) __UpperCamelCase = "{" + ", ".join(arg_to_scheduler_choices) + "}" class _A ( pl.LightningModule ): def __init__( self : Optional[Any] , __magic_name__ : argparse.Namespace , __magic_name__ : str=None , __magic_name__ : Any="base" , __magic_name__ : int=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Tuple=None , **__magic_name__ : List[Any] , ) -> Any: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__magic_name__ ) __snake_case : int = 0 __snake_case : Optional[int] = Path(self.hparams.output_dir ) __snake_case : Union[str, Any] = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __snake_case : Optional[int] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__magic_name__ , **__magic_name__ , ) else: __snake_case : PretrainedConfig = config __snake_case : Optional[Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(self.hparams , __magic_name__ , __magic_name__ ): assert hasattr(self.config , __magic_name__ ), f'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , __magic_name__ , getattr(self.hparams , __magic_name__ ) ) if tokenizer is None: __snake_case : Dict = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__magic_name__ , ) else: __snake_case : PreTrainedTokenizer = tokenizer __snake_case : Optional[Any] = MODEL_MODES[mode] if model is None: __snake_case : Dict = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__magic_name__ , ) else: __snake_case : Optional[Any] = model def lowercase__ ( self : str , *__magic_name__ : Union[str, Any] , **__magic_name__ : List[Any] ) -> Optional[Any]: """simple docstring""" __snake_case : List[str] = self.model_type.from_pretrained(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] __snake_case : str = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __snake_case : Tuple = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1} return scheduler def lowercase__ ( self : Any ) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.model __snake_case : List[Any] = ["""bias""", """LayerNorm.weight"""] __snake_case : List[str] = [ { """params""": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters """weight_decay""": self.hparams.weight_decay, }, { """params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] if self.hparams.adafactor: __snake_case : Dict = Adafactor( __magic_name__ , lr=self.hparams.learning_rate , scale_parameter=__magic_name__ , relative_step=__magic_name__ ) else: __snake_case : Any = AdamW( __magic_name__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __snake_case : Tuple = optimizer __snake_case : Dict = self.get_lr_scheduler() return [optimizer], [scheduler] def lowercase__ ( self : int , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Any: """simple docstring""" return self.validation_step(__magic_name__ , __magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" return self.validation_end(__magic_name__ ) def lowercase__ ( self : List[Any] ) -> int: """simple docstring""" __snake_case : List[str] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __snake_case : Union[str, Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def lowercase__ ( self : List[str] , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" if stage == "test": __snake_case : Optional[int] = len(self.test_dataloader().dataset ) else: __snake_case : int = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__magic_name__ ) __snake_case : Optional[Any] = len(self.train_dataloader().dataset ) def lowercase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : bool = False ) -> Any: """simple docstring""" raise NotImplementedError("""You must implement this for your task""" ) def lowercase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" return self.train_loader def lowercase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__magic_name__ ) def lowercase__ ( self : Dict ) -> Any: """simple docstring""" return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : Dict ) -> Dict: """simple docstring""" return os.path.join( self.hparams.data_dir , """cached_{}_{}_{}""".format( __magic_name__ , list(filter(__magic_name__ , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def lowercase__ ( self : Optional[Any] , __magic_name__ : Dict[str, Any] ) -> None: """simple docstring""" __snake_case : Optional[int] = self.output_dir.joinpath("""best_tfmr""" ) __snake_case : Optional[Any] = self.step_count self.model.save_pretrained(__magic_name__ ) self.tokenizer.save_pretrained(__magic_name__ ) @staticmethod def lowercase__ ( __magic_name__ : List[str] , __magic_name__ : int ) -> Optional[Any]: """simple docstring""" parser.add_argument( """--model_name_or_path""" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--config_name""" , default="""""" , type=__magic_name__ , help="""Pretrained config name or path if not the same as model_name""" ) parser.add_argument( """--tokenizer_name""" , default=__magic_name__ , type=__magic_name__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument( """--cache_dir""" , default=str(Path(__magic_name__ ).parent / """test_run""" / """cache""" ) , type=__magic_name__ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , ) parser.add_argument( """--encoder_layerdrop""" , type=__magic_name__ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--decoder_layerdrop""" , type=__magic_name__ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--dropout""" , type=__magic_name__ , help="""Dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--attention_dropout""" , type=__magic_name__ , help="""Attention dropout probability (Optional). Goes into model.config""" , ) parser.add_argument("""--learning_rate""" , default=5E-5 , type=__magic_name__ , help="""The initial learning rate for Adam.""" ) parser.add_argument( """--lr_scheduler""" , default="""linear""" , choices=__magic_name__ , metavar=__magic_name__ , type=__magic_name__ , help="""Learning rate scheduler""" , ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__magic_name__ , help="""Weight decay if we apply some.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__magic_name__ , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--warmup_steps""" , default=0 , type=__magic_name__ , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--num_workers""" , default=4 , type=__magic_name__ , help="""kwarg passed to DataLoader""" ) parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__magic_name__ ) parser.add_argument("""--train_batch_size""" , default=32 , type=__magic_name__ ) parser.add_argument("""--eval_batch_size""" , default=32 , type=__magic_name__ ) parser.add_argument("""--adafactor""" , action="""store_true""" ) class _A ( pl.Callback ): def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Any ) -> int: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _A ( pl.Callback ): def lowercase__ ( self : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] ) -> str: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__magic_name__ ) class _A ( pl.Callback ): def lowercase__ ( self : str , __magic_name__ : Any , __magic_name__ : Optional[int] ) -> Optional[Any]: """simple docstring""" __snake_case : str = trainer.lr_schedulers[0]["""scheduler"""] __snake_case : List[str] = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : pl.Trainer , __magic_name__ : pl.LightningModule ) -> int: """simple docstring""" rank_zero_info("""***** Validation results *****""" ) __snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(__magic_name__ ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(__magic_name__ , str(metrics[key] ) ) ) def lowercase__ ( self : str , __magic_name__ : pl.Trainer , __magic_name__ : pl.LightningModule ) -> Optional[int]: """simple docstring""" rank_zero_info("""***** Test results *****""" ) __snake_case : Union[str, Any] = trainer.callback_metrics # Log and save results to file __snake_case : Optional[int] = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" ) with open(__magic_name__ , """w""" ) as writer: for key in sorted(__magic_name__ ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(__magic_name__ , str(metrics[key] ) ) ) writer.write("""{} = {}\n""".format(__magic_name__ , str(metrics[key] ) ) ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> None: """simple docstring""" parser.add_argument( """--output_dir""" , default=str(Path(_lowerCamelCase ).parent / """test_run""" / """model_checkpoints""" ) , type=_lowerCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=_lowerCamelCase , default="""O2""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=_lowerCamelCase ) parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=_lowerCamelCase , help="""Max gradient norm""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" ) parser.add_argument( """--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=_lowerCamelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--seed""" , type=_lowerCamelCase , default=42 , help="""random seed for initialization""" ) parser.add_argument( """--data_dir""" , default=str(Path(_lowerCamelCase ).parent / """test_run""" / """dummy-train-data""" ) , type=_lowerCamelCase , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[] , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ) -> Union[str, Any]: """simple docstring""" pl.seed_everything(args.seed ) # init model __snake_case : Any = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_lowerCamelCase ) # add custom checkpoints if checkpoint_callback is None: __snake_case : List[str] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_lowerCamelCase ) if logging_callback is None: __snake_case : Optional[int] = LoggingCallback() __snake_case : List[str] = {} if args.fpaa: __snake_case : List[str] = 16 if args.gpus > 1: __snake_case : List[Any] = """auto""" __snake_case : int = """ddp""" __snake_case : int = args.accumulate_grad_batches __snake_case : Dict = None __snake_case : Union[str, Any] = """auto""" __snake_case : List[str] = pl.Trainer.from_argparse_args( _lowerCamelCase , weights_summary=_lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_lowerCamelCase , ) if args.do_train: trainer.fit(_lowerCamelCase ) else: print("""RAG modeling tests with new set functions successfuly executed!""" ) return trainer
13
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : List[Any] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __snake_case : int = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __snake_case : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above __snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __snake_case : Dict = output[output != -float("""inf""" )] __snake_case : Optional[Any] = tf.cast( tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @require_tf class _A ( unittest.TestCase , __lowercase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase__: Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" __snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Optional[int] = 2 __snake_case : str = 2 class _A ( tf.Module ): def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Dict = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" __snake_case : Tuple = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : int = [[2, 0], [1_02, 1_03]] __snake_case : Tuple = [[1, 0], [1, 1]] __snake_case : Union[str, Any] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for batch_size in range(1 , len(__magic_name__ ) + 1 ): __snake_case : Union[str, Any] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } __snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""] __snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Dict = 1 __snake_case : int = 2 class _A ( tf.Module ): def __init__( self : Tuple , __magic_name__ : List[str] ) -> int: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : Union[str, Any] = [[2], [1_02, 1_03]] __snake_case : Tuple = [[1], [1, 1]] __snake_case : List[str] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for input_row in range(len(__magic_name__ ) ): __snake_case : Tuple = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } __snake_case : str = serving_func(**__magic_name__ )["""sequences"""] __snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow @require_tensorflow_text def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ ) class _A ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ) -> int: """simple docstring""" super().__init__() __snake_case : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() ) __snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ ) __snake_case , __snake_case : List[Any] = text.pad_model_inputs( __magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ ) return self.tokenizer.detokenize(__magic_name__ ) __snake_case : int = CompleteSentenceTransformer() __snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) __snake_case : Tuple = complete_model(__magic_name__ ) __snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ ) keras_model.save(__magic_name__ ) def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } __snake_case : str = 14 __snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : int = """Hello, my dog is cute and""" __snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" ) __snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : List[Any] = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __snake_case : Dict = [6_38, 1_98] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : str = """Hugging Face is a technology company based in New York and Paris.""" __snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids __snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : int = bart_model.generate(__magic_name__ ).numpy() class _A ( __lowercase ): def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) ) class _A ( bart_model.model.encoder.__class__ ): def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) __snake_case : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __snake_case : Dict = bart_model.generate(__magic_name__ ).numpy() with self.assertRaises(__magic_name__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__magic_name__ , foo="""bar""" )
13
1
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = XGLMTokenizer lowercase__: Dict = XGLMTokenizerFast lowercase__: List[str] = True lowercase__: Optional[Any] = True def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __snake_case : List[str] = XGLMTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __snake_case : str = """<pad>""" __snake_case : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def lowercase__ ( self : Any ) -> Tuple: """simple docstring""" __snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(__magic_name__ ) , 10_08 ) def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_08 ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : List[str] = XGLMTokenizer(__magic_name__ , keep_accents=__magic_name__ ) __snake_case : Dict = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __snake_case : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __snake_case : Tuple = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def lowercase__ ( self : str ) -> Any: """simple docstring""" return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__magic_name__ , f.name ) __snake_case : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=__magic_name__ ) __snake_case : str = pickle.dumps(__magic_name__ ) pickle.loads(__magic_name__ ) def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" if not self.test_rust_tokenizer: return __snake_case : Optional[int] = self.get_tokenizer() __snake_case : Optional[Any] = self.get_rust_tokenizer() __snake_case : Dict = """I was born in 92000, and this is falsé.""" __snake_case : Any = tokenizer.tokenize(__magic_name__ ) __snake_case : Optional[Any] = rust_tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) __snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : str = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) __snake_case : Tuple = self.get_rust_tokenizer() __snake_case : Optional[int] = tokenizer.encode(__magic_name__ ) __snake_case : Optional[int] = rust_tokenizer.encode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> List[str]: """simple docstring""" __snake_case : str = """Hello World!""" __snake_case : Optional[int] = [2, 3_12_27, 44_47, 35] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : Optional[int] = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off __snake_case : Optional[int] = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35] # fmt: on self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : str = { """input_ids""": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""facebook/xglm-564M""" , padding=__magic_name__ , )
13
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" __snake_case : int = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def _a ( _lowerCamelCase ) -> None: """simple docstring""" __snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("""""" ) print(len(_lowerCamelCase ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
1
'''simple docstring''' __UpperCamelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __UpperCamelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list[int]: """simple docstring""" __snake_case : Tuple = True __snake_case : Union[str, Any] = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) order.append(_lowerCamelCase ) return order def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list[int]: """simple docstring""" __snake_case : Union[str, Any] = True __snake_case : List[Any] = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return component def _a ( _lowerCamelCase ) -> list[list[int]]: """simple docstring""" __snake_case : List[str] = len(_lowerCamelCase ) * [False] __snake_case : dict[int, list[int]] = {vert: [] for vert in range(len(_lowerCamelCase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_lowerCamelCase ) __snake_case : Any = [] for i, was_visited in enumerate(_lowerCamelCase ): if not was_visited: order += topology_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : List[str] = [] __snake_case : Any = len(_lowerCamelCase ) * [False] for i in range(len(_lowerCamelCase ) ): __snake_case : Any = order[len(_lowerCamelCase ) - i - 1] if not visited[vert]: __snake_case : List[Any] = find_components(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) components_list.append(_lowerCamelCase ) return components_list
13
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCamelCase = logging.getLogger(__name__) class _A ( __lowercase ): def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" super().__init__( __magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , ) __snake_case : List[str] = None def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]: """simple docstring""" logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __snake_case : List[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __snake_case : List[str] = str(distributed_port + 1 ) __snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase__ ( self : int ) -> int: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ ) dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group ) return target_tensor def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ ) return ifname def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): __snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ ) # distributed training __snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic __snake_case : Tuple = None if self._is_main(): __snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )] dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group ) # scatter logic __snake_case : Optional[int] = question_hidden_states.shape[0] __snake_case : Optional[Any] = [] __snake_case : Any = [] if self._is_main(): assert len(__magic_name__ ) == world_size __snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ ) __snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa ) __snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
13
1
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class _A : lowercase__: str lowercase__: Optional[str] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return self.major, self.minor, self.patch def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return Version(__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): return other raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' ) def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]: """simple docstring""" try: __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) return self.tuple < other.tuple def __hash__( self : Any ) -> Any: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase__ ( self : str ) -> str: """simple docstring""" return self.version_str def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase ) if not res: raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" return ".".join(str(_lowerCamelCase ) for v in version_tuple )
13
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class _A ( unittest.TestCase ): def lowercase__ ( self : Dict ) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = """laion/clap-htsat-unfused""" __snake_case : Dict = tempfile.mkdtemp() def lowercase__ ( self : Union[str, Any] , **__magic_name__ : List[str] ) -> str: """simple docstring""" return RobertaTokenizer.from_pretrained(self.checkpoint , **__magic_name__ ) def lowercase__ ( self : List[str] , **__magic_name__ : List[Any] ) -> Optional[int]: """simple docstring""" return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" __snake_case : List[str] = self.get_tokenizer() __snake_case : Tuple = self.get_feature_extractor() __snake_case : Any = ClapProcessor(tokenizer=__magic_name__ , feature_extractor=__magic_name__ ) processor.save_pretrained(self.tmpdirname ) __snake_case : Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __snake_case : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __snake_case : List[Any] = self.get_feature_extractor(do_normalize=__magic_name__ , padding_value=1.0 ) __snake_case : Any = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.get_feature_extractor() __snake_case : int = self.get_tokenizer() __snake_case : List[Any] = ClapProcessor(tokenizer=__magic_name__ , feature_extractor=__magic_name__ ) __snake_case : Tuple = floats_list((3, 10_00) ) __snake_case : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors="""np""" ) __snake_case : str = processor(audios=__magic_name__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : str = self.get_feature_extractor() __snake_case : Any = self.get_tokenizer() __snake_case : List[Any] = ClapProcessor(tokenizer=__magic_name__ , feature_extractor=__magic_name__ ) __snake_case : int = """This is a test string""" __snake_case : List[Any] = processor(text=__magic_name__ ) __snake_case : List[Any] = tokenizer(__magic_name__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __snake_case : str = self.get_feature_extractor() __snake_case : Optional[Any] = self.get_tokenizer() __snake_case : List[Any] = ClapProcessor(tokenizer=__magic_name__ , feature_extractor=__magic_name__ ) __snake_case : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case : Optional[int] = processor.batch_decode(__magic_name__ ) __snake_case : int = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = self.get_feature_extractor() __snake_case : List[Any] = self.get_tokenizer() __snake_case : Dict = ClapProcessor(tokenizer=__magic_name__ , feature_extractor=__magic_name__ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) __snake_case : Tuple = """""" while len(_lowerCamelCase ) % 3 != 0: __snake_case : Any = """0""" + bin_string __snake_case : Tuple = [ bin_string[index : index + 3] for index in range(len(_lowerCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __snake_case : Tuple = 0 for index, val in enumerate(_lowerCamelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) ) oct_string += str(_lowerCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
13
1
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class _A ( nn.Module ): lowercase__: int lowercase__: int lowercase__: float = 0.0 lowercase__: int = 1 lowercase__: int = 1 lowercase__: bool = True lowercase__: bool = False lowercase__: bool = False lowercase__: bool = False lowercase__: jnp.dtype = jnp.floataa def lowercase__ ( self : str ) -> Union[str, Any]: """simple docstring""" __snake_case : List[Any] = [] __snake_case : List[Any] = [] for i in range(self.num_layers ): __snake_case : Dict = self.in_channels if i == 0 else self.out_channels __snake_case : Optional[int] = FlaxResnetBlockaD( in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__magic_name__ ) __snake_case : str = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__magic_name__ ) __snake_case : Optional[int] = resnets __snake_case : Union[str, Any] = attentions if self.add_downsample: __snake_case : List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=True ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): __snake_case : Any = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) __snake_case : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) output_states += (hidden_states,) if self.add_downsample: __snake_case : Optional[int] = self.downsamplers_a(__magic_name__ ) output_states += (hidden_states,) return hidden_states, output_states class _A ( nn.Module ): lowercase__: int lowercase__: int lowercase__: float = 0.0 lowercase__: int = 1 lowercase__: bool = True lowercase__: jnp.dtype = jnp.floataa def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : str = [] for i in range(self.num_layers ): __snake_case : Optional[int] = self.in_channels if i == 0 else self.out_channels __snake_case : int = FlaxResnetBlockaD( in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__magic_name__ ) __snake_case : int = resnets if self.add_downsample: __snake_case : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any]=True ) -> Any: """simple docstring""" __snake_case : Any = () for resnet in self.resnets: __snake_case : Optional[int] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) output_states += (hidden_states,) if self.add_downsample: __snake_case : Optional[int] = self.downsamplers_a(__magic_name__ ) output_states += (hidden_states,) return hidden_states, output_states class _A ( nn.Module ): lowercase__: int lowercase__: int lowercase__: int lowercase__: float = 0.0 lowercase__: int = 1 lowercase__: int = 1 lowercase__: bool = True lowercase__: bool = False lowercase__: bool = False lowercase__: bool = False lowercase__: jnp.dtype = jnp.floataa def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Optional[int] = [] __snake_case : str = [] for i in range(self.num_layers ): __snake_case : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels __snake_case : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels __snake_case : Dict = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__magic_name__ ) __snake_case : List[Any] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__magic_name__ ) __snake_case : Optional[int] = resnets __snake_case : Any = attentions if self.add_upsample: __snake_case : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : str , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=True ) -> Dict: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states __snake_case : Any = res_hidden_states_tuple[-1] __snake_case : int = res_hidden_states_tuple[:-1] __snake_case : Any = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __snake_case : Any = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) __snake_case : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) if self.add_upsample: __snake_case : Dict = self.upsamplers_a(__magic_name__ ) return hidden_states class _A ( nn.Module ): lowercase__: int lowercase__: int lowercase__: int lowercase__: float = 0.0 lowercase__: int = 1 lowercase__: bool = True lowercase__: jnp.dtype = jnp.floataa def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = [] for i in range(self.num_layers ): __snake_case : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels __snake_case : int = self.prev_output_channel if i == 0 else self.out_channels __snake_case : Union[str, Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__magic_name__ ) __snake_case : Optional[Any] = resnets if self.add_upsample: __snake_case : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Optional[int]=True ) -> List[str]: """simple docstring""" for resnet in self.resnets: # pop res hidden states __snake_case : Tuple = res_hidden_states_tuple[-1] __snake_case : Any = res_hidden_states_tuple[:-1] __snake_case : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __snake_case : Any = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) if self.add_upsample: __snake_case : int = self.upsamplers_a(__magic_name__ ) return hidden_states class _A ( nn.Module ): lowercase__: int lowercase__: float = 0.0 lowercase__: int = 1 lowercase__: int = 1 lowercase__: bool = False lowercase__: bool = False lowercase__: jnp.dtype = jnp.floataa def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Tuple = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] __snake_case : List[Any] = [] for _ in range(self.num_layers ): __snake_case : Any = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__magic_name__ ) __snake_case : Tuple = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__magic_name__ ) __snake_case : Optional[int] = resnets __snake_case : Optional[Any] = attentions def __call__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[int]=True ) -> int: """simple docstring""" __snake_case : List[str] = self.resnets[0](__magic_name__ , __magic_name__ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): __snake_case : List[Any] = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) __snake_case : str = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) return hidden_states
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __UpperCamelCase = [ "small", "small-base", "medium", "medium-base", "intermediate", "intermediate-base", "large", "large-base", "xlarge", "xlarge-base", ] __UpperCamelCase = { "vocab_file": { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt", "funnel-transformer/medium-base": ( "https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt" ), "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt", "funnel-transformer/xlarge-base": ( "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json", "funnel-transformer/small-base": ( "https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json" ), "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json", "funnel-transformer/medium-base": ( "https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json" ), "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json", "funnel-transformer/large-base": ( "https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json" ), "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json", "funnel-transformer/xlarge-base": ( "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json" ), }, } __UpperCamelCase = {f"""funnel-transformer/{name}""": 512 for name in _model_names} __UpperCamelCase = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names} class _A ( __lowercase ): lowercase__: Optional[Any] = VOCAB_FILES_NAMES lowercase__: int = PRETRAINED_VOCAB_FILES_MAP lowercase__: Dict = PRETRAINED_INIT_CONFIGURATION lowercase__: Dict = FunnelTokenizer lowercase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: int = 2 def __init__( self : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : str=True , __magic_name__ : Dict="<unk>" , __magic_name__ : str="<sep>" , __magic_name__ : int="<pad>" , __magic_name__ : Optional[Any]="<cls>" , __magic_name__ : Dict="<mask>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="</s>" , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[int]=None , __magic_name__ : str="##" , **__magic_name__ : Union[str, Any] , ) -> Dict: """simple docstring""" super().__init__( __magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , clean_text=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , wordpieces_prefix=__magic_name__ , **__magic_name__ , ) __snake_case : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __magic_name__ ) != do_lower_case or normalizer_state.get("""strip_accents""" , __magic_name__ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __magic_name__ ) != tokenize_chinese_chars ): __snake_case : Any = getattr(__magic_name__ , normalizer_state.pop("""type""" ) ) __snake_case : Tuple = do_lower_case __snake_case : List[Any] = strip_accents __snake_case : Dict = tokenize_chinese_chars __snake_case : Optional[int] = normalizer_class(**__magic_name__ ) __snake_case : Union[str, Any] = do_lower_case def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : Optional[int]=None ) -> Any: """simple docstring""" __snake_case : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : Any , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Tuple = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __snake_case : Union[str, Any] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ )
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
1
'''simple docstring''' def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Tuple = """""" for i in table: res += inp[i - 1] return res def _a ( _lowerCamelCase ) -> Optional[Any]: """simple docstring""" return data[1:] + data[0] def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" __snake_case : Tuple = """""" for i in range(len(_lowerCamelCase ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Union[str, Any] = int("""0b""" + data[0] + data[-1] , 2 ) __snake_case : Any = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: """simple docstring""" __snake_case : int = message[:4] __snake_case : Dict = message[4:] __snake_case : Dict = apply_table(_lowerCamelCase , _lowerCamelCase ) __snake_case : Union[str, Any] = xor(_lowerCamelCase , _lowerCamelCase ) __snake_case : Optional[int] = apply_sbox(_lowerCamelCase , temp[:4] ) # noqa: E741 __snake_case : List[str] = apply_sbox(_lowerCamelCase , temp[4:] ) __snake_case : Tuple = """0""" * (2 - len(_lowerCamelCase )) + l # noqa: E741 __snake_case : List[Any] = """0""" * (2 - len(_lowerCamelCase )) + r __snake_case : Dict = apply_table(l + r , _lowerCamelCase ) __snake_case : List[Any] = xor(_lowerCamelCase , _lowerCamelCase ) return temp + right if __name__ == "__main__": __UpperCamelCase = input("Enter 10 bit key: ") __UpperCamelCase = input("Enter 8 bit message: ") __UpperCamelCase = [6, 3, 7, 4, 8, 5, 10, 9] __UpperCamelCase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] __UpperCamelCase = [2, 4, 3, 1] __UpperCamelCase = [2, 6, 3, 1, 4, 8, 5, 7] __UpperCamelCase = [4, 1, 3, 5, 7, 2, 8, 6] __UpperCamelCase = [4, 1, 2, 3, 2, 3, 4, 1] __UpperCamelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __UpperCamelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __UpperCamelCase = apply_table(key, paa_table) __UpperCamelCase = temp[:5] __UpperCamelCase = temp[5:] __UpperCamelCase = left_shift(left) __UpperCamelCase = left_shift(right) __UpperCamelCase = apply_table(left + right, pa_table) __UpperCamelCase = left_shift(left) __UpperCamelCase = left_shift(right) __UpperCamelCase = left_shift(left) __UpperCamelCase = left_shift(right) __UpperCamelCase = apply_table(left + right, pa_table) # encryption __UpperCamelCase = apply_table(message, IP) __UpperCamelCase = function(expansion, sa, sa, keya, temp) __UpperCamelCase = temp[4:] + temp[:4] __UpperCamelCase = function(expansion, sa, sa, keya, temp) __UpperCamelCase = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption __UpperCamelCase = apply_table(CT, IP) __UpperCamelCase = function(expansion, sa, sa, keya, temp) __UpperCamelCase = temp[4:] + temp[:4] __UpperCamelCase = function(expansion, sa, sa, keya, temp) __UpperCamelCase = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
13
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = image_size __snake_case : Tuple = patch_size __snake_case : str = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : Dict = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : List[Any] = shape_size __snake_case : Tuple = num_labels __snake_case : List[Any] = num_choices __snake_case : Optional[Any] = scope __snake_case : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : List[str] = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __snake_case : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Union[str, Any] = bbox[i, j, 3] __snake_case : Union[str, Any] = bbox[i, j, 1] __snake_case : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Optional[Any] = bbox[i, j, 2] __snake_case : Tuple = bbox[i, j, 0] __snake_case : Optional[Any] = tmp_coordinate __snake_case : Dict = tf.constant(__magic_name__ ) __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ ) # text + image __snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) __snake_case : List[str] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , ) __snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Any = self.num_labels __snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ ) __snake_case : Tuple = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = 2 __snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs __snake_case : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase__: Union[str, Any] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase__: Dict = False lowercase__: int = False lowercase__: Dict = False def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" return True def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict: """simple docstring""" __snake_case : Any = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = { k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : str = TFLayoutLMvaModelTester(self ) __snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ): # The number of elements in the loss should be the same as the number of elements in the label __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0] ] __snake_case : List[str] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = prepared_for_class.pop("""input_ids""" ) __snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : str = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __snake_case : str = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __snake_case : Dict = -1_00 __snake_case : str = tf.convert_to_tensor(__magic_name__ ) __snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = model(__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) # Get keys that were added with the _prepare_for_class function __snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys() __snake_case : Optional[Any] = inspect.signature(model.call ).parameters __snake_case : int = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __snake_case : Union[str, Any] = {0: """input_ids"""} for label_key in label_keys: __snake_case : int = signature_names.index(__magic_name__ ) __snake_case : Optional[int] = label_key __snake_case : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __snake_case : List[str] = prepared_for_class[value] __snake_case : str = tuple(__magic_name__ ) # Send to model __snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Tuple = type self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __snake_case : str = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values __snake_case : Tuple = tf.constant([[1, 2]] ) __snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) # verify the logits __snake_case : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Tuple = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase ) -> list[str]: """simple docstring""" if partitions <= 0: raise ValueError("""partitions must be a positive number!""" ) if partitions > number_of_bytes: raise ValueError("""partitions can not > number_of_bytes!""" ) __snake_case : int = number_of_bytes // partitions __snake_case : int = [] for i in range(_lowerCamelCase ): __snake_case : str = i * bytes_per_partition + 1 __snake_case : str = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(F'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]: """simple docstring""" __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[Any] = num_channels __snake_case : List[str] = patch_size __snake_case : List[str] = num_frames __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : str = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = attention_type __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scope __snake_case : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case : str = (image_size // patch_size) ** 2 __snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case : str = self.num_labels return config def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" __snake_case : Optional[int] = TimesformerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str: """simple docstring""" __snake_case : Any = TimesformerForVideoClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) # verify the logits shape __snake_case : Dict = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__: List[Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__: List[str] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : List[str] = TimesformerModelTester(self ) __snake_case : List[Any] = ConfigTester( self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int: """simple docstring""" __snake_case : Dict = copy.deepcopy(__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = TimesformerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : List[str] = self.model_tester.seq_length __snake_case : Tuple = self.model_tester.num_frames __snake_case : str = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Dict = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Optional[int] = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case : int = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Optional[int] = True __snake_case : Optional[int] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : List[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ): __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.hidden_states __snake_case : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : List[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( __magic_name__ ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : Dict = prepare_video() __snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def _a ( ) -> None: """simple docstring""" print("""Making key files...""" ) make_key_files("""rsa""" , 1024 ) print("""Key files generation successful.""" ) def _a ( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]: """simple docstring""" print("""Generating prime p...""" ) __snake_case : Dict = rabinMiller.generate_large_prime(_lowerCamelCase ) print("""Generating prime q...""" ) __snake_case : Dict = rabinMiller.generate_large_prime(_lowerCamelCase ) __snake_case : str = p * q print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" ) while True: __snake_case : str = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1: break print("""Calculating d that is mod inverse of e...""" ) __snake_case : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) ) __snake_case : Tuple = (n, e) __snake_case : Optional[Any] = (n, d) return (public_key, private_key) def _a ( _lowerCamelCase , _lowerCamelCase ) -> None: """simple docstring""" if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ): print("""\nWARNING:""" ) print( F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' """Use a different name or delete these files and re-run this program.""" ) sys.exit() __snake_case , __snake_case : Union[str, Any] = generate_key(_lowerCamelCase ) print(F'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(F'''{name}_pubkey.txt''' , """w""" ) as out_file: out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' ) print(F'''Writing private key to file {name}_privkey.txt...''' ) with open(F'''{name}_privkey.txt''' , """w""" ) as out_file: out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' ) if __name__ == "__main__": main()
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ConditionalDetrFeatureExtractor"] __UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : Dict = checkpoint __snake_case : Optional[Any] = {} __snake_case : Union[str, Any] = vae_state_dict["""encoder.conv_in.weight"""] __snake_case : str = vae_state_dict["""encoder.conv_in.bias"""] __snake_case : List[Any] = vae_state_dict["""encoder.conv_out.weight"""] __snake_case : Dict = vae_state_dict["""encoder.conv_out.bias"""] __snake_case : List[str] = vae_state_dict["""encoder.norm_out.weight"""] __snake_case : Tuple = vae_state_dict["""encoder.norm_out.bias"""] __snake_case : Dict = vae_state_dict["""decoder.conv_in.weight"""] __snake_case : Union[str, Any] = vae_state_dict["""decoder.conv_in.bias"""] __snake_case : List[str] = vae_state_dict["""decoder.conv_out.weight"""] __snake_case : List[Any] = vae_state_dict["""decoder.conv_out.bias"""] __snake_case : Optional[Any] = vae_state_dict["""decoder.norm_out.weight"""] __snake_case : Optional[int] = vae_state_dict["""decoder.norm_out.bias"""] __snake_case : Tuple = vae_state_dict["""quant_conv.weight"""] __snake_case : List[Any] = vae_state_dict["""quant_conv.bias"""] __snake_case : Any = vae_state_dict["""post_quant_conv.weight"""] __snake_case : Union[str, Any] = vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only __snake_case : List[Any] = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) __snake_case : int = { layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(_lowerCamelCase ) } # Retrieves the keys for the decoder up blocks only __snake_case : List[Any] = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) __snake_case : str = { layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(_lowerCamelCase ) } for i in range(_lowerCamelCase ): __snake_case : List[Any] = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key] if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: __snake_case : List[Any] = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.weight''' ) __snake_case : Any = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.bias''' ) __snake_case : Optional[Any] = renew_vae_resnet_paths(_lowerCamelCase ) __snake_case : Tuple = {"""old""": F'''down.{i}.block''', """new""": F'''down_blocks.{i}.resnets'''} assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase ) __snake_case : str = [key for key in vae_state_dict if """encoder.mid.block""" in key] __snake_case : List[Any] = 2 for i in range(1 , num_mid_res_blocks + 1 ): __snake_case : Dict = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key] __snake_case : Tuple = renew_vae_resnet_paths(_lowerCamelCase ) __snake_case : List[Any] = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase ) __snake_case : int = [key for key in vae_state_dict if """encoder.mid.attn""" in key] __snake_case : int = renew_vae_attention_paths(_lowerCamelCase ) __snake_case : Optional[int] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase ) conv_attn_to_linear(_lowerCamelCase ) for i in range(_lowerCamelCase ): __snake_case : Optional[Any] = num_up_blocks - 1 - i __snake_case : Optional[Any] = [ key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key ] if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: __snake_case : Optional[int] = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.weight''' ] __snake_case : List[Any] = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.bias''' ] __snake_case : Tuple = renew_vae_resnet_paths(_lowerCamelCase ) __snake_case : List[Any] = {"""old""": F'''up.{block_id}.block''', """new""": F'''up_blocks.{i}.resnets'''} assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase ) __snake_case : Tuple = [key for key in vae_state_dict if """decoder.mid.block""" in key] __snake_case : Optional[Any] = 2 for i in range(1 , num_mid_res_blocks + 1 ): __snake_case : Union[str, Any] = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key] __snake_case : Dict = renew_vae_resnet_paths(_lowerCamelCase ) __snake_case : str = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase ) __snake_case : List[Any] = [key for key in vae_state_dict if """decoder.mid.attn""" in key] __snake_case : List[str] = renew_vae_attention_paths(_lowerCamelCase ) __snake_case : Tuple = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase ) conv_attn_to_linear(_lowerCamelCase ) return new_checkpoint def _a ( _lowerCamelCase , _lowerCamelCase , ) -> List[Any]: """simple docstring""" __snake_case : List[str] = requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) __snake_case : int = io.BytesIO(r.content ) __snake_case : List[Any] = OmegaConf.load(_lowerCamelCase ) __snake_case : Union[str, Any] = 512 __snake_case : str = """cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open __snake_case : Any = {} with safe_open(_lowerCamelCase , framework="""pt""" , device="""cpu""" ) as f: for key in f.keys(): __snake_case : List[str] = f.get_tensor(_lowerCamelCase ) else: __snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )["""state_dict"""] # Convert the VAE model. __snake_case : str = create_vae_diffusers_config(_lowerCamelCase , image_size=_lowerCamelCase ) __snake_case : Any = custom_convert_ldm_vae_checkpoint(_lowerCamelCase , _lowerCamelCase ) __snake_case : Optional[Any] = AutoencoderKL(**_lowerCamelCase ) vae.load_state_dict(_lowerCamelCase ) vae.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.") parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.") __UpperCamelCase = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : str = 0 __snake_case : Optional[int] = len(_lowerCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" if len(_lowerCamelCase ) <= 1: return arr, 0 __snake_case : Any = len(_lowerCamelCase ) // 2 __snake_case : List[str] = arr[0:mid] __snake_case : int = arr[mid:] __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase ) __snake_case : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = [] __snake_case : List[str] = 0 while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , _lowerCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __snake_case : Any = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) # an empty list should also have zero inversions __snake_case : List[Any] = [] __snake_case : List[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' import math __UpperCamelCase = 10 __UpperCamelCase = 7 __UpperCamelCase = BALLS_PER_COLOUR * NUM_COLOURS def _a ( _lowerCamelCase = 20 ) -> str: """simple docstring""" __snake_case : int = math.comb(_lowerCamelCase , _lowerCamelCase ) __snake_case : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowerCamelCase ) __snake_case : Tuple = NUM_COLOURS * (1 - missing_colour / total) return F'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
13
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
13
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( __lowercase , unittest.TestCase ): lowercase__: int = KandinskyVaaControlnetPipeline lowercase__: int = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] lowercase__: int = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] lowercase__: Any = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowercase__: List[str] = False @property def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" return 32 @property def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" return 32 @property def lowercase__ ( self : int ) -> List[Any]: """simple docstring""" return self.time_input_dim @property def lowercase__ ( self : List[str] ) -> str: """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return 1_00 @property def lowercase__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : Any = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __snake_case : List[str] = UNetaDConditionModel(**__magic_name__ ) return model @property def lowercase__ ( self : Any ) -> Optional[int]: """simple docstring""" return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : str = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : str ) -> Any: """simple docstring""" __snake_case : int = self.dummy_unet __snake_case : Tuple = self.dummy_movq __snake_case : int = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__magic_name__ , ) __snake_case : Any = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple=0 ) -> int: """simple docstring""" __snake_case : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __magic_name__ ) # create hint __snake_case : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) if str(__magic_name__ ).startswith("""mps""" ): __snake_case : Optional[int] = torch.manual_seed(__magic_name__ ) else: __snake_case : Tuple = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : List[str] = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = """cpu""" __snake_case : int = self.get_dummy_components() __snake_case : int = self.pipeline_class(**__magic_name__ ) __snake_case : List[str] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : Union[str, Any] = pipe(**self.get_dummy_inputs(__magic_name__ ) ) __snake_case : List[str] = output.images __snake_case : List[Any] = pipe( **self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0] __snake_case : Dict = image[0, -3:, -3:, -1] __snake_case : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : List[str] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" __snake_case : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) __snake_case : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) __snake_case : Dict = torch.from_numpy(np.array(__magic_name__ ) ).float() / 255.0 __snake_case : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __snake_case : Any = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__magic_name__ ) __snake_case : Tuple = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa ) __snake_case : Optional[int] = pipeline.to(__magic_name__ ) pipeline.set_progress_bar_config(disable=__magic_name__ ) __snake_case : Union[str, Any] = """A robot, 4k photo""" __snake_case : Optional[int] = torch.Generator(device="""cuda""" ).manual_seed(0 ) __snake_case , __snake_case : List[Any] = pipe_prior( __magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __snake_case : Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 ) __snake_case : Optional[int] = pipeline( image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , hint=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , output_type="""np""" , ) __snake_case : Optional[Any] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
13
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = CanineTokenizer lowercase__: Optional[int] = False def lowercase__ ( self : Any ) -> Any: """simple docstring""" super().setUp() __snake_case : Dict = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer: """simple docstring""" __snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) __snake_case : Optional[Any] = 10_24 return tokenizer @require_torch def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[Any] = self.canine_tokenizer __snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off __snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Any = self.canine_tokenizer __snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] __snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.canine_tokenizer __snake_case : Optional[Any] = [ """What's the weater?""", """It's about 25 degrees.""", ] __snake_case : Any = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Dict = tempfile.mkdtemp() __snake_case : str = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) __snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Optional[int] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __snake_case : List[Any] = chr(0xE007 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE005 __snake_case : Tuple = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) __snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) __snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) __snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) __snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : Dict = chr(0xE005 ) __snake_case : str = chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) __snake_case : Tuple = tokenizer.tokenize(__magic_name__ ) __snake_case : Any = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __snake_case : Optional[Any] = 0xE006 __snake_case : List[str] = chr(__magic_name__ ) __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Any = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Tuple = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE006 __snake_case : int = chr(__magic_name__ ) __snake_case : List[Any] = [new_token_a] __snake_case : Union[str, Any] = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __snake_case : Any = 0xE007 __snake_case : Any = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] __snake_case : Union[str, Any] = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : List[str] = """hello world""" if self.space_between_special_tokens: __snake_case : Union[str, Any] = """[CLS] hello world [SEP]""" else: __snake_case : List[Any] = input __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowercase__ ( self : Tuple ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : str = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __snake_case : Dict = """a""" __snake_case : Tuple = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) __snake_case : Dict = 0xE006 __snake_case : str = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" pass def lowercase__ ( self : str ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass
13
1
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __UpperCamelCase = random.Random() def _a ( _lowerCamelCase , _lowerCamelCase=1.0 , _lowerCamelCase=None , _lowerCamelCase=None ) -> Optional[int]: """simple docstring""" if rng is None: __snake_case : Union[str, Any] = global_rng __snake_case : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class _A ( unittest.TestCase ): def __init__( self : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=7 , __magic_name__ : Union[str, Any]=4_00 , __magic_name__ : Optional[Any]=20_00 , __magic_name__ : List[str]=10 , __magic_name__ : List[str]=1_60 , __magic_name__ : str=8 , __magic_name__ : Any=0.0 , __magic_name__ : str=40_00 , __magic_name__ : Union[str, Any]=False , __magic_name__ : int=True , ) -> Any: """simple docstring""" __snake_case : str = parent __snake_case : List[str] = batch_size __snake_case : Any = min_seq_length __snake_case : Tuple = max_seq_length __snake_case : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __snake_case : Any = padding_value __snake_case : Optional[Any] = sampling_rate __snake_case : Optional[int] = return_attention_mask __snake_case : Optional[Any] = do_normalize __snake_case : Optional[int] = feature_size __snake_case : Tuple = chunk_length __snake_case : Optional[int] = hop_length def lowercase__ ( self : Any ) -> int: """simple docstring""" return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase__ ( self : Tuple , __magic_name__ : List[str]=False , __magic_name__ : List[str]=False ) -> List[Any]: """simple docstring""" def _flatten(__magic_name__ : Tuple ): return list(itertools.chain(*__magic_name__ ) ) if equal_length: __snake_case : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __snake_case : List[Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __snake_case : Tuple = [np.asarray(__magic_name__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _A ( __lowercase , unittest.TestCase ): lowercase__: Dict = WhisperFeatureExtractor if is_speech_available() else None def lowercase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __snake_case : Optional[Any] = WhisperFeatureExtractionTester(self ) def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __snake_case : Optional[int] = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) __snake_case : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ ) __snake_case : int = feat_extract_first.to_dict() __snake_case : str = feat_extract_second.to_dict() __snake_case : str = feat_extract_first.mel_filters __snake_case : Union[str, Any] = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __snake_case : List[str] = os.path.join(__magic_name__ , """feat_extract.json""" ) feat_extract_first.to_json_file(__magic_name__ ) __snake_case : Union[str, Any] = self.feature_extraction_class.from_json_file(__magic_name__ ) __snake_case : Dict = feat_extract_first.to_dict() __snake_case : List[str] = feat_extract_second.to_dict() __snake_case : int = feat_extract_first.mel_filters __snake_case : List[str] = feat_extract_second.mel_filters self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __snake_case : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __snake_case : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] # Test feature size __snake_case : Tuple = feature_extractor(__magic_name__ , padding="""max_length""" , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __snake_case : List[str] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features __snake_case : str = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) # Test batched __snake_case : Tuple = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features __snake_case : int = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __snake_case : int = np.asarray(__magic_name__ ) __snake_case : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features __snake_case : Tuple = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) # Test truncation required __snake_case : Optional[int] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )] __snake_case : List[str] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs] __snake_case : str = [x[: feature_extractor.n_samples] for x in speech_inputs] __snake_case : List[str] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated] __snake_case : Optional[Any] = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features __snake_case : Any = feature_extractor(__magic_name__ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ): self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def lowercase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" import torch __snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __snake_case : Optional[Any] = np.random.rand(1_00 , 32 ).astype(np.floataa ) __snake_case : Optional[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __snake_case : List[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __snake_case : List[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : Any = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech __snake_case : str = ds.sort("""id""" ).select(range(__magic_name__ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on __snake_case : List[str] = self._load_datasamples(1 ) __snake_case : List[str] = WhisperFeatureExtractor() __snake_case : int = feature_extractor(__magic_name__ , return_tensors="""pt""" ).input_features self.assertEqual(input_features.shape , (1, 80, 30_00) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1E-4 ) ) def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __snake_case : str = self._load_datasamples(1 )[0] __snake_case : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue __snake_case : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0] self.assertTrue(np.all(np.mean(__magic_name__ ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1E-3 ) )
13
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
13
1
'''simple docstring''' import math import unittest def _a ( _lowerCamelCase ) -> bool: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def lowercase__ ( self : Dict ) -> str: """simple docstring""" with self.assertRaises(__magic_name__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
13
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class _A ( __lowercase ): lowercase__: str = '''codegen''' lowercase__: Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int: """simple docstring""" __snake_case : List[str] = vocab_size __snake_case : Union[str, Any] = n_ctx __snake_case : int = n_positions __snake_case : str = n_embd __snake_case : Dict = n_layer __snake_case : List[Any] = n_head __snake_case : Any = n_inner __snake_case : str = rotary_dim __snake_case : List[str] = activation_function __snake_case : Tuple = resid_pdrop __snake_case : Dict = embd_pdrop __snake_case : int = attn_pdrop __snake_case : Tuple = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = use_cache __snake_case : Dict = bos_token_id __snake_case : Union[str, Any] = eos_token_id super().__init__( bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ ) class _A ( __lowercase ): def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple: """simple docstring""" super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ ) if not getattr(self._config , """pad_token_id""" , __magic_name__ ): # TODO: how to do that better? __snake_case : List[str] = 0 @property def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) __snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: __snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return self._config.n_layer @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.n_head def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() __snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __snake_case , __snake_case : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __snake_case : Tuple = seqlen + 2 __snake_case : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : List[str] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] __snake_case : Optional[int] = common_inputs["""attention_mask"""] if self.use_past: __snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype __snake_case : Optional[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return 13
13
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __snake_case : str = DisjunctiveConstraint(__magic_name__ ) self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(__magic_name__ ) # fails here def lowercase__ ( self : int ) -> Optional[Any]: """simple docstring""" __snake_case : Any = [[1, 2, 3], [1, 2, 4]] __snake_case : str = DisjunctiveConstraint(__magic_name__ ) __snake_case , __snake_case , __snake_case : Dict = dc.update(1 ) __snake_case : Optional[int] = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __snake_case , __snake_case , __snake_case : int = dc.update(2 ) __snake_case : Dict = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __snake_case , __snake_case , __snake_case : Union[str, Any] = dc.update(3 ) __snake_case : int = stepped is True and completed is True and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" __snake_case : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __snake_case : Union[str, Any] = DisjunctiveConstraint(__magic_name__ ) __snake_case , __snake_case , __snake_case : List[str] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __snake_case , __snake_case , __snake_case : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __snake_case , __snake_case , __snake_case : Any = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __snake_case , __snake_case , __snake_case : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __snake_case , __snake_case , __snake_case : Union[str, Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __snake_case , __snake_case , __snake_case : Tuple = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __snake_case , __snake_case , __snake_case : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
13
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( __lowercase , unittest.TestCase ): lowercase__: int = KandinskyImgaImgPipeline lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] lowercase__: int = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] lowercase__: List[Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowercase__: Any = False @property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return 32 @property def lowercase__ ( self : str ) -> str: """simple docstring""" return 32 @property def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" return self.time_input_dim @property def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return 1_00 @property def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" __snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) __snake_case : Tuple = MultilingualCLIP(__magic_name__ ) __snake_case : Optional[Any] = text_encoder.eval() return text_encoder @property def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __snake_case : Tuple = UNetaDConditionModel(**__magic_name__ ) return model @property def lowercase__ ( self : str ) -> Dict: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : Tuple = self.dummy_text_encoder __snake_case : Dict = self.dummy_tokenizer __snake_case : Dict = self.dummy_unet __snake_case : int = self.dummy_movq __snake_case : List[Any] = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __snake_case : Dict = DDIMScheduler(**__magic_name__ ) __snake_case : Any = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str: """simple docstring""" __snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ ) # create init_image __snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__magic_name__ ).startswith("""mps""" ): __snake_case : str = torch.manual_seed(__magic_name__ ) else: __snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : Optional[Any] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : Dict = """cpu""" __snake_case : Union[str, Any] = self.get_dummy_components() __snake_case : List[str] = self.pipeline_class(**__magic_name__ ) __snake_case : Optional[Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) ) __snake_case : List[str] = output.images __snake_case : Any = pipe( **self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0] __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : int = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) __snake_case : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __snake_case : List[Any] = """A red cartoon frog, 4k""" __snake_case : str = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__magic_name__ ) __snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) __snake_case : Any = pipeline.to(__magic_name__ ) pipeline.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case , __snake_case : Optional[Any] = pipe_prior( __magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __snake_case : List[str] = pipeline( __magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) __snake_case : Dict = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
13
1
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _A ( __lowercase ): def lowercase__ ( self : str , __magic_name__ : float ) -> float: """simple docstring""" return 0.0 def _a ( _lowerCamelCase , _lowerCamelCase ) -> tuple[int | float, int | float]: """simple docstring""" __snake_case : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) __snake_case : List[Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def _a ( _lowerCamelCase , _lowerCamelCase ) -> None: """simple docstring""" __snake_case : Optional[int] = 512 __snake_case : Union[str, Any] = [1] + [0] * (size - 1) __snake_case : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs] __snake_case : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler __snake_case : List[Any] = np.abs(np.fft.fft(_lowerCamelCase ) ) __snake_case : List[Any] = 20 * np.logaa(_lowerCamelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds __snake_case : Optional[int] = get_bounds(_lowerCamelCase , _lowerCamelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(_lowerCamelCase ) plt.show() def _a ( _lowerCamelCase , _lowerCamelCase ) -> None: """simple docstring""" __snake_case : Optional[Any] = 512 __snake_case : Optional[Any] = [1] + [0] * (size - 1) __snake_case : List[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs] __snake_case : List[str] = [0] * (samplerate - size) # zero-padding outputs += filler __snake_case : Any = np.angle(np.fft.fft(_lowerCamelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) ) plt.show()
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __UpperCamelCase = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __UpperCamelCase = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } class _A ( __lowercase ): lowercase__: Any = VOCAB_FILES_NAMES lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask'''] lowercase__: List[str] = BartTokenizer def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]: """simple docstring""" super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) __snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) ) __snake_case : str = add_prefix_space __snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ ) __snake_case : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __snake_case : Any = """post_processor""" __snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: __snake_case : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __snake_case : Tuple = tuple(state["""sep"""] ) if "cls" in state: __snake_case : int = tuple(state["""cls"""] ) __snake_case : Optional[int] = False if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : Optional[Any] = add_prefix_space __snake_case : List[str] = True if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets: __snake_case : Optional[int] = trim_offsets __snake_case : Any = True if changes_to_apply: __snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) ) __snake_case : List[Any] = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def lowercase__ ( self : List[Any] ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value __snake_case : Union[str, Any] = value def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding: """simple docstring""" __snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding: """simple docstring""" __snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Optional[int] = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
13
1
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __UpperCamelCase = "src/diffusers" # Matches is_xxx_available() __UpperCamelCase = re.compile(R"is\_([a-z_]*)_available\(\)") # Matches from xxx import bla __UpperCamelCase = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") __UpperCamelCase = "\n{0} = None\n" __UpperCamelCase = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n" __UpperCamelCase = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" def _a ( _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = _re_backend.findall(_lowerCamelCase ) if len(_lowerCamelCase ) == 0: return None return "_and_".join(_lowerCamelCase ) def _a ( ) -> Union[str, Any]: """simple docstring""" with open(os.path.join(_lowerCamelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __snake_case : int = f.readlines() # Get to the point we do the actual imports for type checking __snake_case : Any = 0 __snake_case : Any = {} # Go through the end of the file while line_index < len(_lowerCamelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __snake_case : Any = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 __snake_case : Tuple = [] # Until we unindent, add backend objects to the list while line_index < len(_lowerCamelCase ) and len(lines[line_index] ) > 1: __snake_case : str = lines[line_index] __snake_case : Optional[Any] = _re_single_line_import.search(_lowerCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(_lowerCamelCase ) > 0: __snake_case : str = objects else: line_index += 1 return backend_specific_objects def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(_lowerCamelCase ) elif name.islower(): return DUMMY_FUNCTION.format(_lowerCamelCase , _lowerCamelCase ) else: return DUMMY_CLASS.format(_lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase=None ) -> str: """simple docstring""" if backend_specific_objects is None: __snake_case : Union[str, Any] = read_init() # For special correspondence backend to module name as used in the function requires_modulename __snake_case : List[Any] = {} for backend, objects in backend_specific_objects.items(): __snake_case : Optional[Any] = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" __snake_case : Dict = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_lowerCamelCase , _lowerCamelCase ) for o in objects] ) __snake_case : Any = dummy_file return dummy_files def _a ( _lowerCamelCase=False ) -> List[str]: """simple docstring""" __snake_case : Dict = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __snake_case : Tuple = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. __snake_case : str = os.path.join(_lowerCamelCase , """utils""" ) __snake_case : Dict = { backend: os.path.join(_lowerCamelCase , F'''dummy_{short_names.get(_lowerCamelCase , _lowerCamelCase )}_objects.py''' ) for backend in dummy_files.keys() } __snake_case : List[str] = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_lowerCamelCase ): with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __snake_case : Union[str, Any] = f.read() else: __snake_case : Optional[int] = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(_lowerCamelCase , _lowerCamelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(_lowerCamelCase , _lowerCamelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __UpperCamelCase = parser.parse_args() check_dummies(args.fix_and_overwrite)
13
'''simple docstring''' import os import numpy import onnx def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Optional[int] = a.name __snake_case : Dict = b.name __snake_case : Optional[int] = """""" __snake_case : int = """""" __snake_case : Any = a == b __snake_case : List[Any] = name_a __snake_case : List[str] = name_b return res def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_lowerCamelCase , _lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Dict = list(model.graph.initializer ) __snake_case : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __snake_case : Tuple = inits[i].name __snake_case : Tuple = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : str = os.path.dirname(_lowerCamelCase ) __snake_case : Dict = os.path.basename(_lowerCamelCase ) __snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) ) __snake_case : Dict = list(model.graph.initializer ) __snake_case : Optional[int] = set() __snake_case : Optional[Any] = {} __snake_case : Tuple = [] __snake_case : List[Any] = 0 for i in range(len(_lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(_lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_lowerCamelCase ) dup_set.add(_lowerCamelCase ) __snake_case : List[Any] = inits[j].data_type __snake_case : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , _lowerCamelCase ) total_reduced_size += mem_size __snake_case : Any = inits[i].name __snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(_lowerCamelCase ) else: __snake_case : Dict = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) __snake_case : int = sorted(_lowerCamelCase ) _remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : str = """optimized_""" + model_file_name __snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) onnx.save(_lowerCamelCase , _lowerCamelCase ) return new_model
13
1
'''simple docstring''' from math import ceil def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : List[str] = list(range(0 , _lowerCamelCase ) ) __snake_case : Dict = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check __snake_case : Dict = [] for i in device_map_blocks: if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(_lowerCamelCase ) # Missing blocks __snake_case : Union[str, Any] = [i for i in blocks if i not in device_map_blocks] __snake_case : Tuple = [i for i in device_map_blocks if i not in blocks] if len(_lowerCamelCase ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(_lowerCamelCase ) ) if len(_lowerCamelCase ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(_lowerCamelCase ) ) if len(_lowerCamelCase ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(_lowerCamelCase ) ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = list(range(_lowerCamelCase ) ) __snake_case : int = int(ceil(n_layers / len(_lowerCamelCase ) ) ) __snake_case : Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , _lowerCamelCase , _lowerCamelCase )] return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
13
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase = ["small", "medium", "large"] __UpperCamelCase = "lm_head.decoder.weight" __UpperCamelCase = "lm_head.weight" def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : Optional[int] = torch.load(_lowerCamelCase ) __snake_case : Optional[int] = d.pop(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) __UpperCamelCase = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") __UpperCamelCase = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
13
1
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel __UpperCamelCase = { "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } __UpperCamelCase = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc") def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> Tuple: """simple docstring""" __snake_case , __snake_case : Tuple = create_model( """HTSAT-tiny""" , """roberta""" , _lowerCamelCase , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_lowerCamelCase , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def _a ( _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = {} __snake_case : Any = R""".*sequential.(\d+).*""" __snake_case : Union[str, Any] = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __snake_case : List[str] = key.replace(_lowerCamelCase , _lowerCamelCase ) if re.match(_lowerCamelCase , _lowerCamelCase ): # replace sequential layers with list __snake_case : Optional[int] = re.match(_lowerCamelCase , _lowerCamelCase ).group(1 ) __snake_case : List[str] = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_lowerCamelCase )//3}.linear.''' ) elif re.match(_lowerCamelCase , _lowerCamelCase ): __snake_case : Dict = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... __snake_case : Tuple = 1 if projecton_layer == 0 else 2 __snake_case : List[Any] = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value __snake_case : Any = value __snake_case : Tuple = mixed_qkv.size(0 ) // 3 __snake_case : Dict = mixed_qkv[:qkv_dim] __snake_case : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] __snake_case : Union[str, Any] = mixed_qkv[qkv_dim * 2 :] __snake_case : Union[str, Any] = query_layer __snake_case : Dict = key_layer __snake_case : Optional[int] = value_layer else: __snake_case : Dict = value return model_state_dict def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> List[str]: """simple docstring""" __snake_case , __snake_case : Dict = init_clap(_lowerCamelCase , enable_fusion=_lowerCamelCase ) clap_model.eval() __snake_case : int = clap_model.state_dict() __snake_case : Dict = rename_state_dict(_lowerCamelCase ) __snake_case : Any = ClapConfig() __snake_case : Any = enable_fusion __snake_case : Optional[Any] = ClapModel(_lowerCamelCase ) # ignore the spectrogram embedding layer model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) transformers_config.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not") __UpperCamelCase = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
13
'''simple docstring''' __UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _a ( ) -> None: """simple docstring""" __snake_case : Dict = input("""Enter message: """ ) __snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ ) __snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): __snake_case : Any = """encrypt""" __snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase ) elif mode.lower().startswith("""d""" ): __snake_case : Optional[int] = """decrypt""" __snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase ) print(F'''\n{mode.title()}ed message:''' ) print(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : str = [] __snake_case : Dict = 0 __snake_case : Optional[int] = key.upper() for symbol in message: __snake_case : Any = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_lowerCamelCase ): __snake_case : Tuple = 0 else: translated.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _A : def __init__( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=13 , __magic_name__ : Dict=7 , __magic_name__ : int=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=True , __magic_name__ : str=99 , __magic_name__ : List[str]=64 , __magic_name__ : str=32 , __magic_name__ : List[str]=5 , __magic_name__ : Dict=4 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : List[str]=16 , __magic_name__ : List[Any]=2 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]=3 , __magic_name__ : int=4 , __magic_name__ : str=None , ) -> Optional[int]: """simple docstring""" __snake_case : str = parent __snake_case : Optional[Any] = batch_size __snake_case : Optional[Any] = seq_length __snake_case : Optional[Any] = is_training __snake_case : List[str] = use_input_mask __snake_case : Union[str, Any] = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : Union[str, Any] = vocab_size __snake_case : str = hidden_size __snake_case : Any = embedding_size __snake_case : Tuple = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Any = intermediate_size __snake_case : List[Any] = hidden_act __snake_case : str = hidden_dropout_prob __snake_case : int = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : Union[str, Any] = type_vocab_size __snake_case : Optional[Any] = type_sequence_label_size __snake_case : Any = initializer_range __snake_case : List[str] = num_labels __snake_case : Optional[Any] = num_choices __snake_case : Dict = scope def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Any = None if self.use_input_mask: __snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Union[str, Any] = None if self.use_token_type_ids: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : int = None __snake_case : List[str] = None __snake_case : Optional[int] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Any ) -> Any: """simple docstring""" return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def lowercase__ ( self : str , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ) -> Any: """simple docstring""" __snake_case : Any = MegatronBertModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Dict = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) __snake_case : Dict = model(__magic_name__ , token_type_ids=__magic_name__ ) __snake_case : str = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = MegatronBertForMaskedLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : Tuple = MegatronBertForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Any: """simple docstring""" __snake_case : Dict = MegatronBertForNextSentencePrediction(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Dict = model( __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : List[Any] = MegatronBertForPreTraining(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : str = model( __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , next_sentence_label=__magic_name__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : str ) -> Dict: """simple docstring""" __snake_case : Any = MegatronBertForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model( __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = self.num_labels __snake_case : Optional[int] = MegatronBertForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = self.num_labels __snake_case : Optional[Any] = MegatronBertForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> int: """simple docstring""" __snake_case : List[str] = self.num_choices __snake_case : str = MegatronBertForMultipleChoice(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[Any] = model( __magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Optional[int] = config_and_inputs __snake_case : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Tuple = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) lowercase__: Any = ( { '''feature-extraction''': MegatronBertModel, '''fill-mask''': MegatronBertForMaskedLM, '''question-answering''': MegatronBertForQuestionAnswering, '''text-classification''': MegatronBertForSequenceClassification, '''text-generation''': MegatronBertForCausalLM, '''token-classification''': MegatronBertForTokenClassification, '''zero-shot''': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) lowercase__: Tuple = True # test_resize_embeddings = False lowercase__: Optional[int] = False def lowercase__ ( self : List[str] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=False ) -> Dict: """simple docstring""" __snake_case : List[Any] = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__magic_name__ ) __snake_case : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" __snake_case : Dict = MegatronBertModelTester(self ) __snake_case : Optional[int] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : int ) -> Optional[Any]: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__magic_name__ ) def lowercase__ ( self : List[Any] ) -> str: """simple docstring""" __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__magic_name__ ) def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__magic_name__ ) def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__magic_name__ ) def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__magic_name__ ) def lowercase__ ( self : List[Any] ) -> int: """simple docstring""" __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__magic_name__ ) def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__magic_name__ ) def _a ( _lowerCamelCase ) -> str: """simple docstring""" return torch.tensor( _lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , ) __UpperCamelCase = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow @unittest.skip("""Model is not available.""" ) def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case : str = """nvidia/megatron-bert-uncased-345m""" if "MYDIR" in os.environ: __snake_case : str = os.path.join(os.environ["""MYDIR"""] , __magic_name__ ) __snake_case : Dict = MegatronBertModel.from_pretrained(__magic_name__ ) model.to(__magic_name__ ) model.half() __snake_case : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): __snake_case : List[Any] = model(__magic_name__ )[0] __snake_case : Union[str, Any] = torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , __magic_name__ ) __snake_case : List[str] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): __snake_case : int = output[0, ii, jj] __snake_case : Dict = expected[3 * ii + jj] __snake_case : Tuple = """ii={} jj={} a={} b={}""".format(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) self.assertTrue(math.isclose(__magic_name__ , __magic_name__ , rel_tol=__magic_name__ , abs_tol=__magic_name__ ) , msg=__magic_name__ )
13
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for attribute in key.split(""".""" ): __snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case : Union[str, Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : Tuple = value elif weight_type == "bias": __snake_case : str = value else: __snake_case : List[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Tuple = [] __snake_case : List[Any] = fairseq_model.state_dict() __snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : Any = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __snake_case : Dict = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2] __snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase ) if "weight_g" in name: __snake_case : Dict = """weight_g""" elif "weight_v" in name: __snake_case : List[str] = """weight_v""" elif "weight" in name: __snake_case : str = """weight""" elif "bias" in name: __snake_case : int = """bias""" else: __snake_case : int = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Dict = full_name.split("""conv_layers.""" )[-1] __snake_case : Optional[int] = name.split(""".""" ) __snake_case : Dict = int(items[0] ) __snake_case : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case : List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : List[str] = SEWConfig() if is_finetuned: __snake_case : List[Any] = model.wav_encoder.wav_model.cfg else: __snake_case : Optional[Any] = model.cfg __snake_case : Tuple = fs_config.conv_bias __snake_case : List[Any] = eval(fs_config.conv_feature_layers ) __snake_case : List[Any] = [x[0] for x in conv_layers] __snake_case : Dict = [x[1] for x in conv_layers] __snake_case : Tuple = [x[2] for x in conv_layers] __snake_case : List[str] = """gelu""" __snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" __snake_case : Optional[int] = 0.0 __snake_case : Optional[Any] = fs_config.activation_fn.name __snake_case : Dict = fs_config.encoder_embed_dim __snake_case : Dict = 0.02 __snake_case : Any = fs_config.encoder_ffn_embed_dim __snake_case : Tuple = 1E-5 __snake_case : Dict = fs_config.encoder_layerdrop __snake_case : Any = fs_config.encoder_attention_heads __snake_case : int = fs_config.conv_pos_groups __snake_case : Tuple = fs_config.conv_pos __snake_case : Optional[int] = len(_lowerCamelCase ) __snake_case : int = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : Union[str, Any] = model.cfg __snake_case : Tuple = fs_config.final_dropout __snake_case : Tuple = fs_config.layerdrop __snake_case : Any = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : List[Any] = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : str = fs_config.mask_channel_length __snake_case : Any = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : str = fs_config.mask_prob __snake_case : str = """Wav2Vec2FeatureExtractor""" __snake_case : Dict = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int: """simple docstring""" if is_finetuned: __snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase ) else: __snake_case : int = convert_config(model[0] , _lowerCamelCase ) __snake_case : Dict = model[0].eval() __snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False __snake_case : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if is_finetuned: if dict_path: __snake_case : str = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Union[str, Any] = target_dict.pad_index __snake_case : Optional[Any] = target_dict.bos_index __snake_case : Tuple = target_dict.pad_index __snake_case : List[str] = target_dict.bos_index __snake_case : Optional[Any] = target_dict.eos_index __snake_case : List[str] = len(target_dict.symbols ) __snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" ) if not os.path.isdir(_lowerCamelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _lowerCamelCase ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case : List[str] = SEWForCTC(_lowerCamelCase ) else: __snake_case : List[str] = SEWModel(_lowerCamelCase ) feature_extractor.save_pretrained(_lowerCamelCase ) recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __UpperCamelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
13
1
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( "stable diffusion controlnet", "0.22.0", "Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.", standard_warn=False, stacklevel=3, )
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : Optional[int] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( _lowerCamelCase = 5000 ) -> int: """simple docstring""" __snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )] for i, pentagonal_i in enumerate(_lowerCamelCase ): for j in range(_lowerCamelCase , len(_lowerCamelCase ) ): __snake_case : Optional[int] = pentagonal_nums[j] __snake_case : str = pentagonal_i + pentagonal_j __snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
13
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "spiece.model"} __UpperCamelCase = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", } } __UpperCamelCase = { "xlnet-base-cased": None, "xlnet-large-cased": None, } # Segments (not really needed) __UpperCamelCase = 0 __UpperCamelCase = 1 __UpperCamelCase = 2 __UpperCamelCase = 3 __UpperCamelCase = 4 class _A ( __lowercase ): lowercase__: List[str] = VOCAB_FILES_NAMES lowercase__: int = PRETRAINED_VOCAB_FILES_MAP lowercase__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: int = '''left''' def __init__( self : int , __magic_name__ : str , __magic_name__ : int=False , __magic_name__ : int=True , __magic_name__ : List[str]=False , __magic_name__ : Any="<s>" , __magic_name__ : List[str]="</s>" , __magic_name__ : int="<unk>" , __magic_name__ : Dict="<sep>" , __magic_name__ : str="<pad>" , __magic_name__ : Tuple="<cls>" , __magic_name__ : Optional[Any]="<mask>" , __magic_name__ : List[str]=["<eop>", "<eod>"] , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Dict , ) -> None: """simple docstring""" __snake_case : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token __snake_case : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) __snake_case : Tuple = 3 __snake_case : int = do_lower_case __snake_case : Optional[int] = remove_space __snake_case : Optional[int] = keep_accents __snake_case : List[Any] = vocab_file __snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) @property def lowercase__ ( self : Tuple ) -> str: """simple docstring""" return len(self.sp_model ) def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" __snake_case : Any = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.__dict__.copy() __snake_case : List[Any] = None return state def __setstate__( self : Union[str, Any] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __snake_case : int = {} __snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self : Dict , __magic_name__ : str ) -> List[str]: """simple docstring""" if self.remove_space: __snake_case : Optional[int] = """ """.join(inputs.strip().split() ) else: __snake_case : List[str] = inputs __snake_case : Optional[Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: __snake_case : int = unicodedata.normalize("""NFKD""" , __magic_name__ ) __snake_case : int = """""".join([c for c in outputs if not unicodedata.combining(__magic_name__ )] ) if self.do_lower_case: __snake_case : Optional[Any] = outputs.lower() return outputs def lowercase__ ( self : List[Any] , __magic_name__ : str ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.preprocess_text(__magic_name__ ) __snake_case : Dict = self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) __snake_case : Optional[Any] = [] for piece in pieces: if len(__magic_name__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): __snake_case : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(__magic_name__ , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __snake_case : List[str] = cur_pieces[1:] else: __snake_case : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__magic_name__ ) else: new_pieces.append(__magic_name__ ) return new_pieces def lowercase__ ( self : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" return self.sp_model.PieceToId(__magic_name__ ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] ) -> int: """simple docstring""" return self.sp_model.IdToPiece(__magic_name__ ) def lowercase__ ( self : str , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = """""".join(__magic_name__ ).replace(__magic_name__ , """ """ ).strip() return out_string def lowercase__ ( self : List[Any] , __magic_name__ : List[int] , __magic_name__ : bool = False , __magic_name__ : bool = None , __magic_name__ : bool = True , **__magic_name__ : Optional[int] , ) -> str: """simple docstring""" __snake_case : Union[str, Any] = kwargs.pop("""use_source_tokenizer""" , __magic_name__ ) __snake_case : Any = self.convert_ids_to_tokens(__magic_name__ , skip_special_tokens=__magic_name__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __snake_case : Dict = [] __snake_case : Any = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__magic_name__ ) ) __snake_case : Optional[int] = [] sub_texts.append(__magic_name__ ) else: current_sub_text.append(__magic_name__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__magic_name__ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens __snake_case : Tuple = """""".join(__magic_name__ ) __snake_case : Optional[Any] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __snake_case : Dict = self.clean_up_tokenization(__magic_name__ ) return clean_text else: return text def lowercase__ ( self : List[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Union[str, Any] = [self.sep_token_id] __snake_case : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is not None: return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1, 1] return ([0] * len(__magic_name__ )) + [1, 1] def lowercase__ ( self : int , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Any = [self.sep_token_id] __snake_case : Optional[int] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase__ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __snake_case : int = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , """wb""" ) as fi: __snake_case : Dict = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,)
13
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : List[Any] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __snake_case : int = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __snake_case : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above __snake_case : str = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) __snake_case : Dict = output[output != -float("""inf""" )] __snake_case : Optional[Any] = tf.cast( tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @require_tf class _A ( unittest.TestCase , __lowercase ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase__: Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" __snake_case : str = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Optional[int] = 2 __snake_case : str = 2 class _A ( tf.Module ): def __init__( self : str , __magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Dict = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" __snake_case : Tuple = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : int = [[2, 0], [1_02, 1_03]] __snake_case : Tuple = [[1, 0], [1, 1]] __snake_case : Union[str, Any] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for batch_size in range(1 , len(__magic_name__ ) + 1 ): __snake_case : Union[str, Any] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } __snake_case : Tuple = serving_func(**__magic_name__ )["""sequences"""] __snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : Dict = 1 __snake_case : int = 2 class _A ( tf.Module ): def __init__( self : Tuple , __magic_name__ : List[str] ) -> int: """simple docstring""" super(__magic_name__ , self ).__init__() __snake_case : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=__magic_name__ , ) def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.model.generate( input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , ) return {"sequences": outputs["sequences"]} __snake_case : Union[str, Any] = [[2], [1_02, 1_03]] __snake_case : Tuple = [[1], [1, 1]] __snake_case : List[str] = DummyModel(model=__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} ) __snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""] for input_row in range(len(__magic_name__ ) ): __snake_case : Tuple = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } __snake_case : str = serving_func(**__magic_name__ )["""sequences"""] __snake_case : Union[str, Any] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ ) tf.debugging.assert_equal(__magic_name__ , __magic_name__ ) @slow @require_tensorflow_text def lowercase__ ( self : Dict ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ ) class _A ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ) -> int: """simple docstring""" super().__init__() __snake_case : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() ) __snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def lowercase__ ( self : Any , __magic_name__ : List[Any] , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.tokenizer.tokenize(__magic_name__ ) __snake_case , __snake_case : List[Any] = text.pad_model_inputs( __magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) __snake_case : Optional[int] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ ) return self.tokenizer.detokenize(__magic_name__ ) __snake_case : int = CompleteSentenceTransformer() __snake_case : Union[str, Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) __snake_case : Tuple = complete_model(__magic_name__ ) __snake_case : Optional[Any] = tf.keras.Model(__magic_name__ , __magic_name__ ) keras_model.save(__magic_name__ ) def lowercase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } __snake_case : str = 14 __snake_case : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : int = """Hello, my dog is cute and""" __snake_case : Any = tokenizer(__magic_name__ , return_tensors="""tf""" ) __snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __snake_case : List[Any] = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : int = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) __snake_case : Dict = [6_38, 1_98] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) __snake_case : Optional[int] = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : str = """Hugging Face is a technology company based in New York and Paris.""" __snake_case : str = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids __snake_case : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : int = bart_model.generate(__magic_name__ ).numpy() class _A ( __lowercase ): def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : int=None , **__magic_name__ : int ) -> Optional[Any]: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) __snake_case : Optional[Any] = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) ) class _A ( bart_model.model.encoder.__class__ ): def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Dict: """simple docstring""" return super().call(__magic_name__ , **__magic_name__ ) __snake_case : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) __snake_case : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __snake_case : Dict = bart_model.generate(__magic_name__ ).numpy() with self.assertRaises(__magic_name__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__magic_name__ , foo="""bar""" )
13
1
'''simple docstring''' import math def _a ( _lowerCamelCase ) -> int: """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case : Dict = F'''Input value of [number={number}] must be an integer''' raise TypeError(_lowerCamelCase ) if number < 1: __snake_case : Union[str, Any] = F'''Input value of [number={number}] must be > 0''' raise ValueError(_lowerCamelCase ) elif number == 1: return 3 elif number == 2: return 5 else: __snake_case : Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2 __snake_case : List[str] = [3, 5] __snake_case : Optional[Any] = 2 __snake_case : Optional[Any] = 3 for block in range(1 , _lowerCamelCase ): for _ in range(_lowerCamelCase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): __UpperCamelCase = 0 try: __UpperCamelCase = proth(number) except ValueError: print(f"""ValueError: there is no {number}th Proth number""") continue print(f"""The {number}th Proth number: {value}""")
13
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" __snake_case : int = len(_lowerCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_lowerCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , ) def _a ( _lowerCamelCase ) -> None: """simple docstring""" __snake_case : list[list[str]] = [] depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase ) # Print all the boards for board in boards: for column in board: print(_lowerCamelCase ) print("""""" ) print(len(_lowerCamelCase ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
1
'''simple docstring''' from pathlib import Path import numpy as np from PIL import Image def _a ( _lowerCamelCase ) -> np.ndarray: """simple docstring""" __snake_case , __snake_case , __snake_case : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b def _a ( _lowerCamelCase ) -> np.ndarray: """simple docstring""" return (gray > 127) & (gray <= 255) def _a ( _lowerCamelCase , _lowerCamelCase ) -> np.ndarray: """simple docstring""" __snake_case : Optional[Any] = np.zeros_like(_lowerCamelCase ) __snake_case : List[Any] = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __snake_case : Dict = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __snake_case : Dict = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __snake_case : Optional[int] = int(summation > 0 ) return output if __name__ == "__main__": # read original image __UpperCamelCase = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __UpperCamelCase = np.array(Image.open(lena_path)) # kernel to be applied __UpperCamelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __UpperCamelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __UpperCamelCase = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
13
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCamelCase = logging.getLogger(__name__) class _A ( __lowercase ): def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" super().__init__( __magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , ) __snake_case : List[str] = None def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]: """simple docstring""" logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __snake_case : List[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __snake_case : List[str] = str(distributed_port + 1 ) __snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase__ ( self : int ) -> int: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ ) dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group ) return target_tensor def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ ) return ifname def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): __snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ ) # distributed training __snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic __snake_case : Tuple = None if self._is_main(): __snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )] dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group ) # scatter logic __snake_case : Optional[int] = question_hidden_states.shape[0] __snake_case : Optional[Any] = [] __snake_case : Any = [] if self._is_main(): assert len(__magic_name__ ) == world_size __snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ ) __snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa ) __snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
13
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=__lowercase ) class _A ( __lowercase ): lowercase__: str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) lowercase__: ClassVar[Features] = Features({'''audio''': Audio()} ) lowercase__: ClassVar[Features] = Features({'''transcription''': Value('''string''' )} ) lowercase__: str = "audio" lowercase__: str = "transcription" def lowercase__ ( self : Optional[int] , __magic_name__ : str ) -> int: """simple docstring""" if self.audio_column not in features: raise ValueError(f'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] , __magic_name__ ): raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' ) __snake_case : List[Any] = copy.deepcopy(self ) __snake_case : Any = self.input_schema.copy() __snake_case : Union[str, Any] = features[self.audio_column] __snake_case : int = input_schema return task_template @property def lowercase__ ( self : int ) -> Dict[str, str]: """simple docstring""" return {self.audio_column: "audio", self.transcription_column: "transcription"}
13
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class _A : lowercase__: str lowercase__: Optional[str] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None lowercase__: Optional[Union[str, int]] = None def lowercase__ ( self : str ) -> List[str]: """simple docstring""" __snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str ) def __repr__( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def lowercase__ ( self : Tuple ) -> Dict: """simple docstring""" return self.major, self.minor, self.patch def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return Version(__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): return other raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' ) def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]: """simple docstring""" try: __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = self._validate_operand(__magic_name__ ) return self.tuple < other.tuple def __hash__( self : Any ) -> Any: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str: """simple docstring""" __snake_case : List[str] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowercase__ ( self : str ) -> str: """simple docstring""" return self.version_str def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase ) if not res: raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" return ".".join(str(_lowerCamelCase ) for v in version_tuple )
13
1
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class _A : lowercase__: List[str] lowercase__: Optional[str] = None # Automatically constructed lowercase__: ClassVar[str] = "dict" lowercase__: ClassVar[Any] = None lowercase__: str = field(default='''Translation''' , init=__lowercase , repr=__lowercase ) def __call__( self : int ) -> Optional[Any]: """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase__ ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class _A : lowercase__: Optional[List] = None lowercase__: Optional[int] = None lowercase__: Optional[str] = None # Automatically constructed lowercase__: ClassVar[str] = "dict" lowercase__: ClassVar[Any] = None lowercase__: str = field(default='''TranslationVariableLanguages''' , init=__lowercase , repr=__lowercase ) def lowercase__ ( self : Dict ) -> Dict: """simple docstring""" __snake_case : int = sorted(set(self.languages ) ) if self.languages else None __snake_case : Optional[Any] = len(self.languages ) if self.languages else None def __call__( self : int ) -> Dict: """simple docstring""" return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowercase__ ( self : int , __magic_name__ : int ) -> Dict: """simple docstring""" __snake_case : Tuple = set(self.languages ) if self.languages and set(__magic_name__ ) - lang_set: raise ValueError( f'''Some languages in example ({", ".join(sorted(set(__magic_name__ ) - lang_set ) )}) are not in valid set ({", ".join(__magic_name__ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __snake_case : int = [] for lang, text in translation_dict.items(): if isinstance(__magic_name__ , __magic_name__ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __snake_case , __snake_case : Any = zip(*sorted(__magic_name__ ) ) return {"language": languages, "translation": translations} def lowercase__ ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> str: """simple docstring""" if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) __snake_case : Tuple = """""" while len(_lowerCamelCase ) % 3 != 0: __snake_case : Any = """0""" + bin_string __snake_case : Tuple = [ bin_string[index : index + 3] for index in range(len(_lowerCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __snake_case : Tuple = 0 for index, val in enumerate(_lowerCamelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCamelCase ) ) oct_string += str(_lowerCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
13
1
'''simple docstring''' from math import sqrt def _a ( _lowerCamelCase ) -> bool: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" __snake_case : Optional[Any] = True # 0 and 1 are none primes. if number <= 1: __snake_case : Dict = False for divisor in range(2 , int(round(sqrt(_lowerCamelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __snake_case : Any = False break # precondition assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'status' must been from type bool" return status def _a ( _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __snake_case : int = list(range(2 , n + 1 ) ) __snake_case : int = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCamelCase ) ): for j in range(i + 1 , len(_lowerCamelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __snake_case : str = 0 # filters actual prime numbers. __snake_case : Tuple = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type list" return ans def _a ( _lowerCamelCase ) -> List[Any]: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n > 2), "'N' must been an int and > 2" __snake_case : Dict = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCamelCase ): ans.append(_lowerCamelCase ) # precondition assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type list" return ans def _a ( _lowerCamelCase ) -> List[Any]: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0" __snake_case : int = [] # this list will be returns of the function. # potential prime number factors. __snake_case : List[str] = 2 __snake_case : List[Any] = number if number == 0 or number == 1: ans.append(_lowerCamelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCamelCase ): while quotient != 1: if is_prime(_lowerCamelCase ) and (quotient % factor == 0): ans.append(_lowerCamelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCamelCase ) # precondition assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type list" return ans def _a ( _lowerCamelCase ) -> Any: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __snake_case : List[str] = 0 # prime factorization of 'number' __snake_case : Any = prime_factorization(_lowerCamelCase ) __snake_case : Any = max(_lowerCamelCase ) # precondition assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type int" return ans def _a ( _lowerCamelCase ) -> List[Any]: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __snake_case : List[Any] = 0 # prime factorization of 'number' __snake_case : int = prime_factorization(_lowerCamelCase ) __snake_case : List[str] = min(_lowerCamelCase ) # precondition assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'ans' must been from type int" return ans def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCamelCase ), "compare bust been from type bool" return number % 2 == 0 def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCamelCase ), "compare bust been from type bool" return number % 2 != 0 def _a ( _lowerCamelCase ) -> Optional[int]: """simple docstring""" assert ( isinstance(_lowerCamelCase , _lowerCamelCase ) and (number > 2) and is_even(_lowerCamelCase ) ), "'number' must been an int, even and > 2" __snake_case : List[Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __snake_case : Tuple = get_prime_numbers(_lowerCamelCase ) __snake_case : Dict = len(_lowerCamelCase ) # run variable for while-loops. __snake_case : Union[str, Any] = 0 __snake_case : Dict = None # exit variable. for break up the loops __snake_case : int = True while i < len_pn and loop: __snake_case : Optional[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __snake_case : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCamelCase , _lowerCamelCase ) and (len(_lowerCamelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" assert ( isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __snake_case : Optional[Any] = 0 while numbera != 0: __snake_case : Union[str, Any] = numbera % numbera __snake_case : Any = numbera __snake_case : Dict = rest # precondition assert isinstance(_lowerCamelCase , _lowerCamelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: """simple docstring""" assert ( isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __snake_case : int = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __snake_case : int = prime_factorization(_lowerCamelCase ) __snake_case : Optional[int] = prime_factorization(_lowerCamelCase ) elif numbera == 1 or numbera == 1: __snake_case : List[str] = [] __snake_case : List[str] = [] __snake_case : Dict = max(_lowerCamelCase , _lowerCamelCase ) __snake_case : Optional[int] = 0 __snake_case : Union[str, Any] = 0 __snake_case : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __snake_case : Optional[Any] = prime_fac_a.count(_lowerCamelCase ) __snake_case : int = prime_fac_a.count(_lowerCamelCase ) for _ in range(max(_lowerCamelCase , _lowerCamelCase ) ): ans *= n else: __snake_case : Union[str, Any] = prime_fac_a.count(_lowerCamelCase ) for _ in range(_lowerCamelCase ): ans *= n done.append(_lowerCamelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __snake_case : List[str] = prime_fac_a.count(_lowerCamelCase ) for _ in range(_lowerCamelCase ): ans *= n done.append(_lowerCamelCase ) # precondition assert isinstance(_lowerCamelCase , _lowerCamelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _a ( _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 0), "'number' must been a positive int" __snake_case : Dict = 0 __snake_case : Optional[Any] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCamelCase ): ans += 1 # precondition assert isinstance(_lowerCamelCase , _lowerCamelCase ) and is_prime( _lowerCamelCase ), "'ans' must been a prime number and from type int" return ans def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" assert ( is_prime(_lowerCamelCase ) and is_prime(_lowerCamelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __snake_case : List[str] = p_number_a + 1 # jump to the next number __snake_case : Optional[int] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCamelCase ): number += 1 while number < p_number_a: ans.append(_lowerCamelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCamelCase ): number += 1 # precondition assert ( isinstance(_lowerCamelCase , _lowerCamelCase ) and ans[0] != p_number_a and ans[len(_lowerCamelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _a ( _lowerCamelCase ) -> str: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1" __snake_case : Any = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCamelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def _a ( _lowerCamelCase ) -> Any: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and ( number > 1 ), "'number' must been an int and >= 1" __snake_case : Optional[int] = get_divisors(_lowerCamelCase ) # precondition assert ( isinstance(_lowerCamelCase , _lowerCamelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCamelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _a ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: """simple docstring""" assert ( isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __snake_case : Tuple = gcd(abs(_lowerCamelCase ) , abs(_lowerCamelCase ) ) # precondition assert ( isinstance(_lowerCamelCase , _lowerCamelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0" __snake_case : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _a ( _lowerCamelCase ) -> Optional[Any]: """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0" __snake_case : Any = 0 __snake_case : Tuple = 1 __snake_case : str = 1 # this will be return for _ in range(n - 1 ): __snake_case : Any = ans ans += fiba __snake_case : Any = tmp return ans
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __UpperCamelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __UpperCamelCase = TaTokenizerFast __UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __UpperCamelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
13
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : Tuple = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __snake_case : Optional[Any] = get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(__magic_name__ ) , torch_builtin(__magic_name__ ) ) ) self.assertFalse(torch.allclose(gelu_python(__magic_name__ ) , gelu_new(__magic_name__ ) ) ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Any = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __snake_case : Optional[Any] = get_activation("""gelu""" ) __snake_case : List[Any] = get_activation("""gelu_10""" ) __snake_case : List[str] = torch_builtin(__magic_name__ ) __snake_case : List[str] = geluaa(__magic_name__ ) __snake_case : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(__magic_name__ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowercase__ ( self : Dict ) -> Dict: """simple docstring""" get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(__magic_name__ ): get_activation("""bogus""" ) with self.assertRaises(__magic_name__ ): get_activation(__magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = get_activation("""gelu""" ) __snake_case : Optional[Any] = 1 __snake_case : str = get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__magic_name__ ): __snake_case : Tuple = acta.a
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): @slow def lowercase__ ( self : List[str] ) -> int: """simple docstring""" __snake_case : List[Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __snake_case : Tuple = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __snake_case : List[str] = model(__magic_name__ )["""last_hidden_state"""] __snake_case : Any = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. __snake_case : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
13
1
'''simple docstring''' import numpy as np def _a ( _lowerCamelCase , _lowerCamelCase ) -> np.ndarray: """simple docstring""" return np.where(vector > 0 , _lowerCamelCase , (alpha * (np.exp(_lowerCamelCase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Tuple = batch_size __snake_case : List[Any] = num_channels __snake_case : Dict = image_size __snake_case : Tuple = patch_size __snake_case : str = is_training __snake_case : Optional[Any] = use_input_mask __snake_case : int = use_token_type_ids __snake_case : str = use_labels __snake_case : Dict = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Tuple = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : List[Any] = shape_size __snake_case : Tuple = num_labels __snake_case : List[Any] = num_choices __snake_case : Optional[Any] = scope __snake_case : List[str] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : List[str] = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __snake_case : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : Union[str, Any] = bbox[i, j, 3] __snake_case : Union[str, Any] = bbox[i, j, 1] __snake_case : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Optional[Any] = bbox[i, j, 2] __snake_case : Tuple = bbox[i, j, 0] __snake_case : Optional[Any] = tmp_coordinate __snake_case : Dict = tf.constant(__magic_name__ ) __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Any = None if self.use_input_mask: __snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ ) # text + image __snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) __snake_case : List[str] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , ) __snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any: """simple docstring""" __snake_case : Any = self.num_labels __snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ ) __snake_case : Tuple = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = 2 __snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ ) __snake_case : List[Any] = model( __magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs __snake_case : List[Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase__: Union[str, Any] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase__: Dict = False lowercase__: int = False lowercase__: Dict = False def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" return True def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict: """simple docstring""" __snake_case : Any = copy.deepcopy(__magic_name__ ) if model_class in get_values(__magic_name__ ): __snake_case : Union[str, Any] = { k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__magic_name__ ): __snake_case : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : str = TFLayoutLMvaModelTester(self ) __snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ): # The number of elements in the loss should be the same as the number of elements in the label __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Any = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0] ] __snake_case : List[str] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = prepared_for_class.pop("""input_ids""" ) __snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : str = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: __snake_case : str = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __snake_case : Dict = -1_00 __snake_case : str = tf.convert_to_tensor(__magic_name__ ) __snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) __snake_case : Tuple = model(__magic_name__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ ) # Get keys that were added with the _prepare_for_class function __snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys() __snake_case : Optional[Any] = inspect.signature(model.call ).parameters __snake_case : int = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __snake_case : Union[str, Any] = {0: """input_ids"""} for label_key in label_keys: __snake_case : int = signature_names.index(__magic_name__ ) __snake_case : Optional[int] = label_key __snake_case : Optional[int] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __snake_case : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __snake_case : List[str] = prepared_for_class[value] __snake_case : str = tuple(__magic_name__ ) # Send to model __snake_case : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase__ ( self : List[str] ) -> List[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Tuple = type self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) @slow def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ) -> Optional[Any]: """simple docstring""" __snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None @slow def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) __snake_case : str = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values __snake_case : Tuple = tf.constant([[1, 2]] ) __snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ ) # verify the logits __snake_case : List[str] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ ) __snake_case : Tuple = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' __UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _a ( ) -> None: """simple docstring""" __snake_case : Dict = input("""Enter message: """ ) __snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ ) __snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): __snake_case : Any = """encrypt""" __snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase ) elif mode.lower().startswith("""d""" ): __snake_case : Optional[int] = """decrypt""" __snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase ) print(F'''\n{mode.title()}ed message:''' ) print(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : str = [] __snake_case : Dict = 0 __snake_case : Optional[int] = key.upper() for symbol in message: __snake_case : Any = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_lowerCamelCase ): __snake_case : Tuple = 0 else: translated.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) if __name__ == "__main__": main()
13
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _A : def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]: """simple docstring""" __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = image_size __snake_case : List[Any] = num_channels __snake_case : List[str] = patch_size __snake_case : List[str] = num_frames __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : str = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Tuple = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = attention_type __snake_case : Optional[Any] = initializer_range __snake_case : Optional[Any] = scope __snake_case : Optional[int] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case : str = (image_size // patch_size) ** 2 __snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case : str = self.num_labels return config def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int: """simple docstring""" __snake_case : Optional[int] = TimesformerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str: """simple docstring""" __snake_case : Any = TimesformerForVideoClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Optional[int] = model(__magic_name__ ) # verify the logits shape __snake_case : Dict = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs __snake_case : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__: List[Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__: List[str] = False lowercase__: List[Any] = False lowercase__: Dict = False lowercase__: int = False def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : List[str] = TimesformerModelTester(self ) __snake_case : List[Any] = ConfigTester( self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int: """simple docstring""" __snake_case : Dict = copy.deepcopy(__magic_name__ ) if return_labels: if model_class in get_values(__magic_name__ ): __snake_case : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ ) return inputs_dict def lowercase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def lowercase__ ( self : List[str] ) -> Any: """simple docstring""" pass def lowercase__ ( self : str ) -> Optional[int]: """simple docstring""" __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(__magic_name__ ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Union[str, Any] = [*signature.parameters.keys()] __snake_case : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowercase__ ( self : str ) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__magic_name__ ) @slow def lowercase__ ( self : List[Any] ) -> Dict: """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = TimesformerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowercase__ ( self : Dict ) -> Optional[int]: """simple docstring""" if not self.has_attentions: pass else: __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : List[str] = self.model_tester.seq_length __snake_case : Tuple = self.model_tester.num_frames __snake_case : str = True __snake_case : List[str] = False __snake_case : Tuple = True __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : Dict = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Optional[int] = True __snake_case : Any = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case : int = len(__magic_name__ ) # Check attention is always last and order is fine __snake_case : Optional[int] = True __snake_case : Optional[int] = True __snake_case : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) self.assertEqual(out_len + 1 , len(__magic_name__ ) ) __snake_case : List[Any] = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ): __snake_case : str = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) __snake_case : int = outputs.hidden_states __snake_case : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) __snake_case : int = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) __snake_case : List[Any] = np.load(_lowerCamelCase ) return list(_lowerCamelCase ) @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( __magic_name__ ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : Dict = prepare_video() __snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): __snake_case : Any = model(**__magic_name__ ) # verify the logits __snake_case : int = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) __snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
13
1
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart __UpperCamelCase = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } __UpperCamelCase = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } @lru_cache() def _a ( ) -> str: """simple docstring""" __snake_case : Optional[int] = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) __snake_case : Tuple = bs[:] __snake_case : List[Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_lowerCamelCase ) cs.append(2**8 + n ) n += 1 __snake_case : Tuple = [chr(_lowerCamelCase ) for n in cs] return dict(zip(_lowerCamelCase , _lowerCamelCase ) ) def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = set() __snake_case : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __snake_case : Tuple = char return pairs class _A ( __lowercase ): lowercase__: Optional[Any] = VOCAB_FILES_NAMES lowercase__: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]="replace" , __magic_name__ : str="<s>" , __magic_name__ : Optional[int]="</s>" , __magic_name__ : str="</s>" , __magic_name__ : List[str]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : int="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : List[str]=False , **__magic_name__ : List[str] , ) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token __snake_case : Union[str, Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token __snake_case : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token __snake_case : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token __snake_case : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token __snake_case : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token super().__init__( errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding="""utf-8""" ) as vocab_handle: __snake_case : int = json.load(__magic_name__ ) __snake_case : Any = {v: k for k, v in self.encoder.items()} __snake_case : Tuple = errors # how to handle errors in decoding __snake_case : Optional[int] = bytes_to_unicode() __snake_case : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(__magic_name__ , encoding="""utf-8""" ) as merges_handle: __snake_case : str = merges_handle.read().split("""\n""" )[1:-1] __snake_case : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges] __snake_case : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) __snake_case : Optional[int] = {} __snake_case : str = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __snake_case : List[str] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def lowercase__ ( self : Any ) -> List[Any]: """simple docstring""" return len(self.encoder ) def lowercase__ ( self : List[str] ) -> str: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowercase__ ( self : Dict , __magic_name__ : Optional[Any] ) -> Optional[int]: """simple docstring""" if token in self.cache: return self.cache[token] __snake_case : List[str] = tuple(__magic_name__ ) __snake_case : str = get_pairs(__magic_name__ ) if not pairs: return token while True: __snake_case : List[str] = min(__magic_name__ , key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __snake_case , __snake_case : Optional[Any] = bigram __snake_case : Union[str, Any] = [] __snake_case : Optional[int] = 0 while i < len(__magic_name__ ): try: __snake_case : Union[str, Any] = word.index(__magic_name__ , __magic_name__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __snake_case : str = j if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __snake_case : Union[str, Any] = tuple(__magic_name__ ) __snake_case : Optional[int] = new_word if len(__magic_name__ ) == 1: break else: __snake_case : Dict = get_pairs(__magic_name__ ) __snake_case : Tuple = """ """.join(__magic_name__ ) __snake_case : Any = word return word def lowercase__ ( self : List[str] , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" __snake_case : Dict = [] for token in re.findall(self.pat , __magic_name__ ): __snake_case : Union[str, Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__magic_name__ ).split(""" """ ) ) return bpe_tokens def lowercase__ ( self : List[Any] , __magic_name__ : Optional[Any] ) -> str: """simple docstring""" return self.encoder.get(__magic_name__ , self.encoder.get(self.unk_token ) ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[Any] ) -> Tuple: """simple docstring""" return self.decoder.get(__magic_name__ ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[Any] ) -> int: """simple docstring""" __snake_case : Optional[int] = """""".join(__magic_name__ ) __snake_case : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowercase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __snake_case : Union[str, Any] = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __snake_case : Any = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + """\n""" ) __snake_case : List[Any] = 0 with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) __snake_case : List[Any] = token_index writer.write(""" """.join(__magic_name__ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] __snake_case : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self : int , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is None: return [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] def lowercase__ ( self : Tuple , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Union[str, Any] = [self.sep_token_id] __snake_case : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : str=False , **__magic_name__ : Optional[Any] ) -> str: """simple docstring""" __snake_case : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__magic_name__ ) > 0 and not text[0].isspace()): __snake_case : str = """ """ + text return (text, kwargs)
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["ConditionalDetrFeatureExtractor"] __UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST", "WavLMForAudioFrameClassification", "WavLMForCTC", "WavLMForSequenceClassification", "WavLMForXVector", "WavLMModel", "WavLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' def _a ( _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : str = 0 __snake_case : Optional[int] = len(_lowerCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , _lowerCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a ( _lowerCamelCase ) -> Tuple: """simple docstring""" if len(_lowerCamelCase ) <= 1: return arr, 0 __snake_case : Any = len(_lowerCamelCase ) // 2 __snake_case : List[str] = arr[0:mid] __snake_case : int = arr[mid:] __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : Tuple = count_inversions_recursive(_lowerCamelCase ) __snake_case , __snake_case : str = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase ) __snake_case : str = inversion_p + inversions_q + cross_inversions return c, num_inversions def _a ( _lowerCamelCase , _lowerCamelCase ) -> int: """simple docstring""" __snake_case : Any = [] __snake_case : List[str] = 0 while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a ( ) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __snake_case : Optional[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , _lowerCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __snake_case : Any = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) # an empty list should also have zero inversions __snake_case : List[Any] = [] __snake_case : List[Any] = count_inversions_bf(_lowerCamelCase ) __snake_case , __snake_case : List[Any] = count_inversions_recursive(_lowerCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , _lowerCamelCase ) if __name__ == "__main__": main()
13
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class _A ( unittest.TestCase ): lowercase__: str = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowercase__: str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def lowercase__ ( self : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Any ) -> List[Any]: """simple docstring""" __snake_case : int = TextaTextGenerationPipeline(model=__magic_name__ , tokenizer=__magic_name__ ) return generator, ["Something to write", "Something else"] def lowercase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Any = generator("""Something there""" ) self.assertEqual(__magic_name__ , [{"""generated_text""": ANY(__magic_name__ )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) __snake_case : Optional[int] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__magic_name__ ) self.assertEqual( __magic_name__ , [ [{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}], [{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}], ] , ) __snake_case : List[Any] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__magic_name__ ) self.assertEqual( __magic_name__ , [ [{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}], [{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}], ] , ) with self.assertRaises(__magic_name__ ): generator(4 ) @require_torch def lowercase__ ( self : Optional[int] ) -> Dict: """simple docstring""" __snake_case : Dict = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility __snake_case : int = generator("""Something there""" , do_sample=__magic_name__ ) self.assertEqual(__magic_name__ , [{"""generated_text""": """"""}] ) __snake_case : str = 3 __snake_case : int = generator( """Something there""" , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , ) __snake_case : Any = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(__magic_name__ , __magic_name__ ) __snake_case : List[str] = generator("""This is a test""" , do_sample=__magic_name__ , num_return_sequences=2 , return_tensors=__magic_name__ ) self.assertEqual( __magic_name__ , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) __snake_case : Any = generator.model.config.eos_token_id __snake_case : Any = """<pad>""" __snake_case : Optional[int] = generator( ["""This is a test""", """This is a second test"""] , do_sample=__magic_name__ , num_return_sequences=2 , batch_size=2 , return_tensors=__magic_name__ , ) self.assertEqual( __magic_name__ , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def lowercase__ ( self : int ) -> int: """simple docstring""" __snake_case : int = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility __snake_case : List[Any] = generator("""Something there""" , do_sample=__magic_name__ ) self.assertEqual(__magic_name__ , [{"""generated_text""": """"""}] )
13
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
13
1
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer __UpperCamelCase = logging.get_logger(__name__) class _A ( __lowercase ): lowercase__: int = '''AutoTokenizer''' lowercase__: Union[str, Any] = ['''tokenizer'''] lowercase__: Union[str, Any] = { '''semantic_prompt''': 1, '''coarse_prompt''': 2, '''fine_prompt''': 2, } def __init__( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : List[Any]=None ) -> str: """simple docstring""" super().__init__(__magic_name__ ) __snake_case : Tuple = speaker_embeddings @classmethod def lowercase__ ( cls : Dict , __magic_name__ : Any , __magic_name__ : int="speaker_embeddings_path.json" , **__magic_name__ : List[Any] ) -> List[str]: """simple docstring""" if speaker_embeddings_dict_path is not None: __snake_case : Optional[int] = get_file_from_repo( __magic_name__ , __magic_name__ , subfolder=kwargs.pop("""subfolder""" , __magic_name__ ) , cache_dir=kwargs.pop("""cache_dir""" , __magic_name__ ) , force_download=kwargs.pop("""force_download""" , __magic_name__ ) , proxies=kwargs.pop("""proxies""" , __magic_name__ ) , resume_download=kwargs.pop("""resume_download""" , __magic_name__ ) , local_files_only=kwargs.pop("""local_files_only""" , __magic_name__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , __magic_name__ ) , revision=kwargs.pop("""revision""" , __magic_name__ ) , ) if speaker_embeddings_path is None: logger.warning( f'''`{os.path.join(__magic_name__ , __magic_name__ )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) __snake_case : Union[str, Any] = None else: with open(__magic_name__ ) as speaker_embeddings_json: __snake_case : int = json.load(__magic_name__ ) else: __snake_case : Optional[int] = None __snake_case : Tuple = AutoTokenizer.from_pretrained(__magic_name__ , **__magic_name__ ) return cls(tokenizer=__magic_name__ , speaker_embeddings=__magic_name__ ) def lowercase__ ( self : Any , __magic_name__ : List[str] , __magic_name__ : List[Any]="speaker_embeddings_path.json" , __magic_name__ : str="speaker_embeddings" , __magic_name__ : bool = False , **__magic_name__ : Dict , ) -> Optional[int]: """simple docstring""" if self.speaker_embeddings is not None: os.makedirs(os.path.join(__magic_name__ , __magic_name__ , """v2""" ) , exist_ok=__magic_name__ ) __snake_case : Tuple = {} __snake_case : List[Any] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": __snake_case : Any = self._load_voice_preset(__magic_name__ ) __snake_case : str = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , __magic_name__ , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=__magic_name__ , ) __snake_case : str = os.path.join(__magic_name__ , f'''{prompt_key}_{key}.npy''' ) __snake_case : Optional[Any] = tmp_dict with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as fp: json.dump(__magic_name__ , __magic_name__ ) super().save_pretrained(__magic_name__ , __magic_name__ , **__magic_name__ ) def lowercase__ ( self : Union[str, Any] , __magic_name__ : str = None , **__magic_name__ : str ) -> Optional[Any]: """simple docstring""" __snake_case : Tuple = self.speaker_embeddings[voice_preset] __snake_case : List[str] = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) __snake_case : Tuple = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , __magic_name__ ) , cache_dir=kwargs.pop("""cache_dir""" , __magic_name__ ) , force_download=kwargs.pop("""force_download""" , __magic_name__ ) , proxies=kwargs.pop("""proxies""" , __magic_name__ ) , resume_download=kwargs.pop("""resume_download""" , __magic_name__ ) , local_files_only=kwargs.pop("""local_files_only""" , __magic_name__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , __magic_name__ ) , revision=kwargs.pop("""revision""" , __magic_name__ ) , ) if path is None: raise ValueError( f'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) __snake_case : List[Any] = np.load(__magic_name__ ) return voice_preset_dict def lowercase__ ( self : List[str] , __magic_name__ : Optional[dict] = None ) -> Dict: """simple docstring""" for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self : Optional[int] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : Union[str, Any]="pt" , __magic_name__ : Optional[int]=2_56 , __magic_name__ : Optional[Any]=False , __magic_name__ : Optional[int]=True , __magic_name__ : str=False , **__magic_name__ : List[str] , ) -> str: """simple docstring""" if voice_preset is not None and not isinstance(__magic_name__ , __magic_name__ ): if ( isinstance(__magic_name__ , __magic_name__ ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): __snake_case : Dict = self._load_voice_preset(__magic_name__ ) else: if isinstance(__magic_name__ , __magic_name__ ) and not voice_preset.endswith(""".npz""" ): __snake_case : Any = voice_preset + """.npz""" __snake_case : Optional[Any] = np.load(__magic_name__ ) if voice_preset is not None: self._validate_voice_preset_dict(__magic_name__ , **__magic_name__ ) __snake_case : str = BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ ) __snake_case : Union[str, Any] = self.tokenizer( __magic_name__ , return_tensors=__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , return_attention_mask=__magic_name__ , return_token_type_ids=__magic_name__ , add_special_tokens=__magic_name__ , **__magic_name__ , ) if voice_preset is not None: __snake_case : Any = voice_preset return encoded_text
13
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _A ( __lowercase , unittest.TestCase ): lowercase__: List[Any] = CanineTokenizer lowercase__: Optional[int] = False def lowercase__ ( self : Any ) -> Any: """simple docstring""" super().setUp() __snake_case : Dict = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer: """simple docstring""" __snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) __snake_case : Optional[Any] = 10_24 return tokenizer @require_torch def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : Optional[Any] = self.canine_tokenizer __snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off __snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on __snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : Any = self.canine_tokenizer __snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] __snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : Dict = self.canine_tokenizer __snake_case : Optional[Any] = [ """What's the weater?""", """It's about 25 degrees.""", ] __snake_case : Any = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowercase__ ( self : Tuple ) -> int: """simple docstring""" __snake_case : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Dict = tempfile.mkdtemp() __snake_case : str = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) __snake_case : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running""" __snake_case : Optional[int] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: __snake_case : List[Any] = chr(0xE007 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) __snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ ) __snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE005 __snake_case : Tuple = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) __snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) __snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) __snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) __snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowercase__ ( self : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : Dict = chr(0xE005 ) __snake_case : str = chr(0xE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) __snake_case : Tuple = tokenizer.tokenize(__magic_name__ ) __snake_case : Any = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: __snake_case : Optional[Any] = 0xE006 __snake_case : List[str] = chr(__magic_name__ ) __snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowercase__ ( self : Any ) -> int: """simple docstring""" __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Any = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __snake_case : Tuple = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: __snake_case : Tuple = 0xE006 __snake_case : int = chr(__magic_name__ ) __snake_case : List[Any] = [new_token_a] __snake_case : Union[str, Any] = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) __snake_case : Any = 0xE007 __snake_case : Any = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] __snake_case : Union[str, Any] = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowercase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : List[str] = """hello world""" if self.space_between_special_tokens: __snake_case : Union[str, Any] = """[CLS] hello world [SEP]""" else: __snake_case : List[Any] = input __snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) __snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowercase__ ( self : Tuple ) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __snake_case : str = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] __snake_case : Dict = """a""" __snake_case : Tuple = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) __snake_case : Dict = 0xE006 __snake_case : str = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowercase__ ( self : Dict ) -> int: """simple docstring""" pass def lowercase__ ( self : str ) -> Tuple: """simple docstring""" pass def lowercase__ ( self : Tuple ) -> List[str]: """simple docstring""" pass def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" pass def lowercase__ ( self : List[Any] ) -> Any: """simple docstring""" pass def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" pass
13
1
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class _A ( unittest.TestCase ): def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> Optional[int]: """simple docstring""" __snake_case : Any = jnp.ones((batch_size, length) ) / length return scores def lowercase__ ( self : Any ) -> List[Any]: """simple docstring""" __snake_case : Any = None __snake_case : str = 20 __snake_case : List[str] = self._get_uniform_logits(batch_size=2 , length=__magic_name__ ) # tweak scores to not be uniform anymore __snake_case : Union[str, Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch __snake_case : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax __snake_case : str = jax.nn.softmax(__magic_name__ , axis=-1 ) __snake_case : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 ) __snake_case : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 ) __snake_case : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(__magic_name__ , scores.copy() , cur_len=__magic_name__ ) , axis=-1 ) __snake_case : str = jax.nn.softmax(temp_dist_warper_smoother(__magic_name__ , scores.copy() , cur_len=__magic_name__ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def lowercase__ ( self : int ) -> List[Any]: """simple docstring""" __snake_case : Any = None __snake_case : Optional[int] = 10 __snake_case : int = 2 # create ramp distribution __snake_case : str = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, vocab_size) ).copy() __snake_case : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size __snake_case : Union[str, Any] = FlaxTopKLogitsWarper(3 ) __snake_case : Optional[int] = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case __snake_case : Dict = 5 __snake_case : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) __snake_case : str = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, length) ).copy() __snake_case : Tuple = top_k_warp_safety_check(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : Union[str, Any] = None __snake_case : Optional[int] = 10 __snake_case : List[str] = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) __snake_case : Optional[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) __snake_case : Any = FlaxTopPLogitsWarper(0.8 ) __snake_case : List[str] = np.exp(top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 __snake_case : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) # check edge cases with negative and extreme logits __snake_case : Optional[int] = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme __snake_case : Union[str, Any] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept __snake_case : Dict = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) __snake_case : Dict = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __snake_case : Dict = 20 __snake_case : Dict = 4 __snake_case : int = 0 __snake_case : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ ) # check that min length is applied at length 5 __snake_case : int = ids_tensor((batch_size, 20) , vocab_size=20 ) __snake_case : Optional[int] = 5 __snake_case : Any = self._get_uniform_logits(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = min_dist_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 __snake_case : Optional[int] = self._get_uniform_logits(__magic_name__ , __magic_name__ ) __snake_case : Tuple = 15 __snake_case : Dict = min_dist_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) self.assertFalse(jnp.isinf(__magic_name__ ).any() ) def lowercase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = 20 __snake_case : str = 4 __snake_case : List[str] = 0 __snake_case : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ ) # check that all scores are -inf except the bos_token_id score __snake_case : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 ) __snake_case : Tuple = 1 __snake_case : Union[str, Any] = self._get_uniform_logits(__magic_name__ , __magic_name__ ) __snake_case : Tuple = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 __snake_case : Optional[Any] = 3 __snake_case : Any = self._get_uniform_logits(__magic_name__ , __magic_name__ ) __snake_case : Optional[int] = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) self.assertFalse(jnp.isinf(__magic_name__ ).any() ) def lowercase__ ( self : Dict ) -> str: """simple docstring""" __snake_case : Optional[int] = 20 __snake_case : Dict = 4 __snake_case : Optional[Any] = 0 __snake_case : Optional[Any] = 5 __snake_case : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ ) # check that all scores are -inf except the eos_token_id when max_length is reached __snake_case : Any = ids_tensor((batch_size, 4) , vocab_size=20 ) __snake_case : List[str] = 4 __snake_case : Tuple = self._get_uniform_logits(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached __snake_case : int = 3 __snake_case : Optional[Any] = self._get_uniform_logits(__magic_name__ , __magic_name__ ) __snake_case : Any = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) self.assertFalse(jnp.isinf(__magic_name__ ).any() ) def lowercase__ ( self : List[str] ) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Union[str, Any] = 10 __snake_case : List[Any] = 15 __snake_case : List[Any] = 2 __snake_case : Any = 1 __snake_case : Optional[Any] = 15 # dummy input_ids and scores __snake_case : int = ids_tensor((batch_size, sequence_length) , __magic_name__ ) __snake_case : Tuple = input_ids.copy() __snake_case : int = self._get_uniform_logits(__magic_name__ , __magic_name__ ) __snake_case : Optional[int] = scores.copy() # instantiate all dist processors __snake_case : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 ) __snake_case : List[Any] = FlaxTopKLogitsWarper(3 ) __snake_case : Optional[int] = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors __snake_case : str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ ) __snake_case : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ ) __snake_case : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ ) __snake_case : Union[str, Any] = 10 # no processor list __snake_case : int = temp_dist_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) __snake_case : Dict = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) __snake_case : List[Any] = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) __snake_case : int = min_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) __snake_case : Optional[Any] = bos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) __snake_case : Any = eos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) # with processor list __snake_case : Optional[int] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) __snake_case : Optional[int] = processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) # scores should be equal self.assertTrue(jnp.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def lowercase__ ( self : Dict ) -> Any: """simple docstring""" __snake_case : Any = 4 __snake_case : Optional[int] = 10 __snake_case : Dict = 15 __snake_case : Tuple = 2 __snake_case : Union[str, Any] = 1 __snake_case : int = 15 # dummy input_ids and scores __snake_case : Any = ids_tensor((batch_size, sequence_length) , __magic_name__ ) __snake_case : Optional[Any] = input_ids.copy() __snake_case : Dict = self._get_uniform_logits(__magic_name__ , __magic_name__ ) __snake_case : Any = scores.copy() # instantiate all dist processors __snake_case : Any = FlaxTemperatureLogitsWarper(temperature=0.5 ) __snake_case : str = FlaxTopKLogitsWarper(3 ) __snake_case : int = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors __snake_case : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ ) __snake_case : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ ) __snake_case : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ ) __snake_case : str = 10 # no processor list def run_no_processor_list(__magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ): __snake_case : Union[str, Any] = temp_dist_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) __snake_case : List[Any] = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) __snake_case : Union[str, Any] = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) __snake_case : str = min_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) __snake_case : str = bos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) __snake_case : Optional[Any] = eos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) return scores # with processor list def run_processor_list(__magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Tuple ): __snake_case : Any = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) __snake_case : str = processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) return scores __snake_case : Optional[int] = jax.jit(__magic_name__ ) __snake_case : Optional[int] = jax.jit(__magic_name__ ) __snake_case : Any = jitted_run_no_processor_list(__magic_name__ , __magic_name__ , __magic_name__ ) __snake_case : Dict = jitted_run_processor_list(__magic_name__ , __magic_name__ , __magic_name__ ) # scores should be equal self.assertTrue(jnp.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
13
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
13
1
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _A ( __lowercase ): lowercase__: Optional[Any] = ['''image_processor'''] lowercase__: str = '''SamImageProcessor''' def __init__( self : List[str] , __magic_name__ : Any ) -> str: """simple docstring""" super().__init__(__magic_name__ ) __snake_case : Any = self.image_processor __snake_case : List[str] = -10 __snake_case : Union[str, Any] = self.image_processor.size["""longest_edge"""] def __call__( self : Union[str, Any] , __magic_name__ : str=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : int=None , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : Optional[int] , ) -> BatchEncoding: """simple docstring""" __snake_case : Union[str, Any] = self.image_processor( __magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) # pop arguments that are not used in the foward but used nevertheless __snake_case : Optional[int] = encoding_image_processor["""original_sizes"""] if hasattr(__magic_name__ , """numpy""" ): # Checks if Torch or TF tensor __snake_case : int = original_sizes.numpy() __snake_case , __snake_case , __snake_case : Union[str, Any] = self._check_and_preprocess_points( input_points=__magic_name__ , input_labels=__magic_name__ , input_boxes=__magic_name__ , ) __snake_case : Optional[Any] = self._normalize_and_convert( __magic_name__ , __magic_name__ , input_points=__magic_name__ , input_labels=__magic_name__ , input_boxes=__magic_name__ , return_tensors=__magic_name__ , ) return encoding_image_processor def lowercase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Any=None , __magic_name__ : Any="pt" , ) -> Optional[int]: """simple docstring""" if input_points is not None: if len(__magic_name__ ) != len(__magic_name__ ): __snake_case : Union[str, Any] = [ self._normalize_coordinates(self.target_size , __magic_name__ , original_sizes[0] ) for point in input_points ] else: __snake_case : Optional[int] = [ self._normalize_coordinates(self.target_size , __magic_name__ , __magic_name__ ) for point, original_size in zip(__magic_name__ , __magic_name__ ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: __snake_case , __snake_case : int = self._pad_points_and_labels(__magic_name__ , __magic_name__ ) __snake_case : Union[str, Any] = np.array(__magic_name__ ) if input_labels is not None: __snake_case : str = np.array(__magic_name__ ) if input_boxes is not None: if len(__magic_name__ ) != len(__magic_name__ ): __snake_case : List[Any] = [ self._normalize_coordinates(self.target_size , __magic_name__ , original_sizes[0] , is_bounding_box=__magic_name__ ) for box in input_boxes ] else: __snake_case : str = [ self._normalize_coordinates(self.target_size , __magic_name__ , __magic_name__ , is_bounding_box=__magic_name__ ) for box, original_size in zip(__magic_name__ , __magic_name__ ) ] __snake_case : Any = np.array(__magic_name__ ) if input_boxes is not None: if return_tensors == "pt": __snake_case : List[str] = torch.from_numpy(__magic_name__ ) # boxes batch size of 1 by default __snake_case : List[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": __snake_case : List[Any] = tf.convert_to_tensor(__magic_name__ ) # boxes batch size of 1 by default __snake_case : str = tf.expand_dims(__magic_name__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"""input_boxes""": input_boxes} ) if input_points is not None: if return_tensors == "pt": __snake_case : Tuple = torch.from_numpy(__magic_name__ ) # point batch size of 1 by default __snake_case : str = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": __snake_case : List[Any] = tf.convert_to_tensor(__magic_name__ ) # point batch size of 1 by default __snake_case : int = tf.expand_dims(__magic_name__ , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"""input_points""": input_points} ) if input_labels is not None: if return_tensors == "pt": __snake_case : Dict = torch.from_numpy(__magic_name__ ) # point batch size of 1 by default __snake_case : int = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": __snake_case : List[Any] = tf.convert_to_tensor(__magic_name__ ) # point batch size of 1 by default __snake_case : Optional[int] = tf.expand_dims(__magic_name__ , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"""input_labels""": input_labels} ) return encoding_image_processor def lowercase__ ( self : Any , __magic_name__ : List[Any] , __magic_name__ : int ) -> Optional[int]: """simple docstring""" __snake_case : Dict = max([point.shape[0] for point in input_points] ) __snake_case : Optional[int] = [] for i, point in enumerate(__magic_name__ ): if point.shape[0] != expected_nb_points: __snake_case : Dict = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) __snake_case : int = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(__magic_name__ ) __snake_case : List[Any] = processed_input_points return input_points, input_labels def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : np.ndarray , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=False ) -> np.ndarray: """simple docstring""" __snake_case , __snake_case : Optional[Any] = original_size __snake_case , __snake_case : Dict = self.image_processor._get_preprocess_shape(__magic_name__ , longest_edge=__magic_name__ ) __snake_case : Dict = deepcopy(__magic_name__ ).astype(__magic_name__ ) if is_bounding_box: __snake_case : int = coords.reshape(-1 , 2 , 2 ) __snake_case : List[Any] = coords[..., 0] * (new_w / old_w) __snake_case : Optional[Any] = coords[..., 1] * (new_h / old_h) if is_bounding_box: __snake_case : str = coords.reshape(-1 , 4 ) return coords def lowercase__ ( self : str , __magic_name__ : List[Any]=None , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , ) -> str: """simple docstring""" if input_points is not None: if hasattr(__magic_name__ , """numpy""" ): # Checks for TF or Torch tensor __snake_case : Tuple = input_points.numpy().tolist() if not isinstance(__magic_name__ , __magic_name__ ) or not isinstance(input_points[0] , __magic_name__ ): raise ValueError("""Input points must be a list of list of floating points.""" ) __snake_case : List[Any] = [np.array(__magic_name__ ) for input_point in input_points] else: __snake_case : Optional[int] = None if input_labels is not None: if hasattr(__magic_name__ , """numpy""" ): __snake_case : Any = input_labels.numpy().tolist() if not isinstance(__magic_name__ , __magic_name__ ) or not isinstance(input_labels[0] , __magic_name__ ): raise ValueError("""Input labels must be a list of list integers.""" ) __snake_case : Dict = [np.array(__magic_name__ ) for label in input_labels] else: __snake_case : List[Any] = None if input_boxes is not None: if hasattr(__magic_name__ , """numpy""" ): __snake_case : Tuple = input_boxes.numpy().tolist() if ( not isinstance(__magic_name__ , __magic_name__ ) or not isinstance(input_boxes[0] , __magic_name__ ) or not isinstance(input_boxes[0][0] , __magic_name__ ) ): raise ValueError("""Input boxes must be a list of list of list of floating points.""" ) __snake_case : int = [np.array(__magic_name__ ).astype(np.floataa ) for box in input_boxes] else: __snake_case : List[str] = None return input_points, input_labels, input_boxes @property def lowercase__ ( self : Optional[Any] ) -> Any: """simple docstring""" __snake_case : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(__magic_name__ ) ) def lowercase__ ( self : Any , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Dict: """simple docstring""" return self.image_processor.post_process_masks(*__magic_name__ , **__magic_name__ )
13
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class _A ( __lowercase ): lowercase__: str = '''codegen''' lowercase__: Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any]=5_04_00 , __magic_name__ : Any=20_48 , __magic_name__ : List[str]=20_48 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Tuple=28 , __magic_name__ : Dict=16 , __magic_name__ : List[str]=64 , __magic_name__ : str=None , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Tuple=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : int=5_02_56 , __magic_name__ : Any=False , **__magic_name__ : Optional[int] , ) -> int: """simple docstring""" __snake_case : List[str] = vocab_size __snake_case : Union[str, Any] = n_ctx __snake_case : int = n_positions __snake_case : str = n_embd __snake_case : Dict = n_layer __snake_case : List[Any] = n_head __snake_case : Any = n_inner __snake_case : str = rotary_dim __snake_case : List[str] = activation_function __snake_case : Tuple = resid_pdrop __snake_case : Dict = embd_pdrop __snake_case : int = attn_pdrop __snake_case : Tuple = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = use_cache __snake_case : Dict = bos_token_id __snake_case : Union[str, Any] = eos_token_id super().__init__( bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ ) class _A ( __lowercase ): def __init__( self : int , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Tuple: """simple docstring""" super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ ) if not getattr(self._config , """pad_token_id""" , __magic_name__ ): # TODO: how to do that better? __snake_case : List[str] = 0 @property def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) __snake_case : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: __snake_case : Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowercase__ ( self : Tuple ) -> int: """simple docstring""" return self._config.n_layer @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.n_head def lowercase__ ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __snake_case : Tuple = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() __snake_case : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __snake_case , __snake_case : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __snake_case : Tuple = seqlen + 2 __snake_case : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : List[str] = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] __snake_case : Optional[int] = common_inputs["""attention_mask"""] if self.use_past: __snake_case : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype __snake_case : Optional[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : Union[str, Any] ) -> int: """simple docstring""" return 13
13
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class _A ( datasets.BuilderConfig ): lowercase__: Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): lowercase__: str = PandasConfig def lowercase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : Any , __magic_name__ : str ) -> Any: """simple docstring""" if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __snake_case : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__magic_name__ , (str, list, tuple) ): __snake_case : Optional[int] = data_files if isinstance(__magic_name__ , __magic_name__ ): __snake_case : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : Union[str, Any] = [dl_manager.iter_files(__magic_name__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] __snake_case : Dict = [] for split_name, files in data_files.items(): if isinstance(__magic_name__ , __magic_name__ ): __snake_case : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case : str = [dl_manager.iter_files(__magic_name__ ) for file in files] splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) ) return splits def lowercase__ ( self : Dict , __magic_name__ : pa.Table ) -> pa.Table: """simple docstring""" if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case : Tuple = table_cast(__magic_name__ , self.config.features.arrow_schema ) return pa_table def lowercase__ ( self : Dict , __magic_name__ : List[str] ) -> Tuple: """simple docstring""" for i, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ): with open(__magic_name__ , """rb""" ) as f: __snake_case : Optional[int] = pa.Table.from_pandas(pd.read_pickle(__magic_name__ ) ) yield i, self._cast_table(__magic_name__ )
13
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( __lowercase , unittest.TestCase ): lowercase__: int = KandinskyImgaImgPipeline lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image'''] lowercase__: int = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', ] lowercase__: List[Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowercase__: Any = False @property def lowercase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return 32 @property def lowercase__ ( self : str ) -> str: """simple docstring""" return 32 @property def lowercase__ ( self : Tuple ) -> Any: """simple docstring""" return self.time_input_dim @property def lowercase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return 1_00 @property def lowercase__ ( self : List[str] ) -> List[str]: """simple docstring""" __snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) __snake_case : Tuple = MultilingualCLIP(__magic_name__ ) __snake_case : Optional[Any] = text_encoder.eval() return text_encoder @property def lowercase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __snake_case : Tuple = UNetaDConditionModel(**__magic_name__ ) return model @property def lowercase__ ( self : str ) -> Dict: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) __snake_case : int = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Tuple ) -> str: """simple docstring""" __snake_case : Tuple = self.dummy_text_encoder __snake_case : Dict = self.dummy_tokenizer __snake_case : Dict = self.dummy_unet __snake_case : int = self.dummy_movq __snake_case : List[Any] = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __snake_case : Dict = DDIMScheduler(**__magic_name__ ) __snake_case : Any = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str: """simple docstring""" __snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ ) # create init_image __snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) __snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(__magic_name__ ).startswith("""mps""" ): __snake_case : str = torch.manual_seed(__magic_name__ ) else: __snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) __snake_case : Optional[Any] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowercase__ ( self : int ) -> str: """simple docstring""" __snake_case : Dict = """cpu""" __snake_case : Union[str, Any] = self.get_dummy_components() __snake_case : List[str] = self.pipeline_class(**__magic_name__ ) __snake_case : Optional[Any] = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) ) __snake_case : List[str] = output.images __snake_case : Any = pipe( **self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0] __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : int = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): def lowercase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) __snake_case : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __snake_case : List[Any] = """A red cartoon frog, 4k""" __snake_case : str = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__magic_name__ ) __snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) __snake_case : Any = pipeline.to(__magic_name__ ) pipeline.set_progress_bar_config(disable=__magic_name__ ) __snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __snake_case , __snake_case : Optional[Any] = pipe_prior( __magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __snake_case : List[str] = pipeline( __magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) __snake_case : Dict = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
13
1
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCamelCase = logging.getLogger(__name__) class _A ( __lowercase ): def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" super().__init__( __magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , ) __snake_case : List[str] = None def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]: """simple docstring""" logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __snake_case : List[Any] = self._infer_socket_ifname() # avoid clash with the NCCL port __snake_case : List[str] = str(distributed_port + 1 ) __snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase__ ( self : int ) -> int: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]: """simple docstring""" __snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ ) dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group ) return target_tensor def lowercase__ ( self : Optional[int] ) -> Any: """simple docstring""" __snake_case : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ ) return ifname def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): __snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ ) # distributed training __snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic __snake_case : Tuple = None if self._is_main(): __snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )] dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group ) # scatter logic __snake_case : Optional[int] = question_hidden_states.shape[0] __snake_case : Optional[Any] = [] __snake_case : Any = [] if self._is_main(): assert len(__magic_name__ ) == world_size __snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ ) __snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ ) __snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa ) __snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
13
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __UpperCamelCase = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __UpperCamelCase = { "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } class _A ( __lowercase ): lowercase__: Any = VOCAB_FILES_NAMES lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask'''] lowercase__: List[str] = BartTokenizer def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]: """simple docstring""" super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) __snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) ) __snake_case : str = add_prefix_space __snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ ) __snake_case : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __snake_case : Any = """post_processor""" __snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: __snake_case : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __snake_case : Tuple = tuple(state["""sep"""] ) if "cls" in state: __snake_case : int = tuple(state["""cls"""] ) __snake_case : Optional[int] = False if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: __snake_case : Optional[Any] = add_prefix_space __snake_case : List[str] = True if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets: __snake_case : Optional[int] = trim_offsets __snake_case : Any = True if changes_to_apply: __snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) ) __snake_case : List[Any] = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def lowercase__ ( self : List[Any] ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value __snake_case : Union[str, Any] = value def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding: """simple docstring""" __snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding: """simple docstring""" __snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __snake_case : Optional[int] = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCamelCase = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import os import numpy import onnx def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Optional[int] = a.name __snake_case : Dict = b.name __snake_case : Optional[int] = """""" __snake_case : int = """""" __snake_case : Any = a == b __snake_case : List[Any] = name_a __snake_case : List[str] = name_b return res def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_lowerCamelCase , _lowerCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: """simple docstring""" __snake_case : Dict = list(model.graph.initializer ) __snake_case : List[Any] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __snake_case : Tuple = inits[i].name __snake_case : Tuple = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase ) def _a ( _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : str = os.path.dirname(_lowerCamelCase ) __snake_case : Dict = os.path.basename(_lowerCamelCase ) __snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) ) __snake_case : Dict = list(model.graph.initializer ) __snake_case : Optional[int] = set() __snake_case : Optional[Any] = {} __snake_case : Tuple = [] __snake_case : List[Any] = 0 for i in range(len(_lowerCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(_lowerCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_lowerCamelCase ) dup_set.add(_lowerCamelCase ) __snake_case : List[Any] = inits[j].data_type __snake_case : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , _lowerCamelCase ) total_reduced_size += mem_size __snake_case : Any = inits[i].name __snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(_lowerCamelCase ) else: __snake_case : Dict = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) __snake_case : int = sorted(_lowerCamelCase ) _remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : str = """optimized_""" + model_file_name __snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) onnx.save(_lowerCamelCase , _lowerCamelCase ) return new_model
13
1
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class _A ( __lowercase ): def lowercase__ ( self : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : Optional[int]=None , __magic_name__ : str=None , **__magic_name__ : Tuple ) -> List[str]: """simple docstring""" if tokenize_kwargs is None: __snake_case : List[str] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __snake_case : Optional[int] = truncation __snake_case : Any = tokenize_kwargs __snake_case : Union[str, Any] = {} if return_tensors is not None: __snake_case : List[str] = return_tensors return preprocess_params, {}, postprocess_params def lowercase__ ( self : Tuple , __magic_name__ : str , **__magic_name__ : Any ) -> Dict[str, GenericTensor]: """simple docstring""" __snake_case : Optional[Any] = self.framework __snake_case : Tuple = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) return model_inputs def lowercase__ ( self : Optional[int] , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : int = self.model(**__magic_name__ ) return model_outputs def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : Dict=False ) -> int: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : str , *__magic_name__ : Dict , **__magic_name__ : Tuple ) -> Optional[int]: """simple docstring""" return super().__call__(*__magic_name__ , **__magic_name__ )
13
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase = ["small", "medium", "large"] __UpperCamelCase = "lm_head.decoder.weight" __UpperCamelCase = "lm_head.weight" def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict: """simple docstring""" __snake_case : Optional[int] = torch.load(_lowerCamelCase ) __snake_case : Optional[int] = d.pop(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) __UpperCamelCase = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") __UpperCamelCase = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
13
1
'''simple docstring''' import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Optional[Any] = torch.load(_lowerCamelCase , map_location="""cpu""" ) __snake_case : Optional[Any] = chkpt["""model"""] # We have the base model one level deeper than the original XLM repository __snake_case : Optional[Any] = {} for k, v in state_dict.items(): if "pred_layer" in k: __snake_case : int = v else: __snake_case : Any = v __snake_case : Dict = chkpt["""params"""] __snake_case : str = {n: v for n, v in config.items() if not isinstance(_lowerCamelCase , (torch.FloatTensor, numpy.ndarray) )} __snake_case : List[str] = chkpt["""dico_word2id"""] __snake_case : Union[str, Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()} # Save pytorch-model __snake_case : Tuple = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __snake_case : Optional[int] = pytorch_dump_folder_path + """/""" + CONFIG_NAME __snake_case : List[Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""] print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(_lowerCamelCase , _lowerCamelCase ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(_lowerCamelCase , indent=2 ) + """\n""" ) print(F'''Save vocab file to {pytorch_config_dump_path}''' ) with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(_lowerCamelCase , indent=2 ) + """\n""" ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __UpperCamelCase = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
13
'''simple docstring''' __UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _a ( ) -> None: """simple docstring""" __snake_case : Dict = input("""Enter message: """ ) __snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ ) __snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): __snake_case : Any = """encrypt""" __snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase ) elif mode.lower().startswith("""d""" ): __snake_case : Optional[int] = """decrypt""" __snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase ) print(F'''\n{mode.title()}ed message:''' ) print(_lowerCamelCase ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: """simple docstring""" __snake_case : str = [] __snake_case : Dict = 0 __snake_case : Optional[int] = key.upper() for symbol in message: __snake_case : Any = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_lowerCamelCase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_lowerCamelCase ): __snake_case : Tuple = 0 else: translated.append(_lowerCamelCase ) return "".join(_lowerCamelCase ) if __name__ == "__main__": main()
13
1